Loading...
1/*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/pci.h>
35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/timer.h>
40#include <linux/usb.h>
41#include <linux/usb/ch9.h>
42#include <linux/usb/gadget.h>
43
44#include <asm/byteorder.h>
45#include <asm/system.h>
46#include <asm/unaligned.h>
47
48#include "net2272.h"
49
50#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
51
52static const char driver_name[] = "net2272";
53static const char driver_vers[] = "2006 October 17/mainline";
54static const char driver_desc[] = DRIVER_DESC;
55
56static const char ep0name[] = "ep0";
57static const char * const ep_name[] = {
58 ep0name,
59 "ep-a", "ep-b", "ep-c",
60};
61
62#define DMA_ADDR_INVALID (~(dma_addr_t)0)
63#ifdef CONFIG_USB_GADGET_NET2272_DMA
64/*
65 * use_dma: the NET2272 can use an external DMA controller.
66 * Note that since there is no generic DMA api, some functions,
67 * notably request_dma, start_dma, and cancel_dma will need to be
68 * modified for your platform's particular dma controller.
69 *
70 * If use_dma is disabled, pio will be used instead.
71 */
72static int use_dma = 0;
73module_param(use_dma, bool, 0644);
74
75/*
76 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
77 * The NET2272 can only use dma for a single endpoint at a time.
78 * At some point this could be modified to allow either endpoint
79 * to take control of dma as it becomes available.
80 *
81 * Note that DMA should not be used on OUT endpoints unless it can
82 * be guaranteed that no short packets will arrive on an IN endpoint
83 * while the DMA operation is pending. Otherwise the OUT DMA will
84 * terminate prematurely (See NET2272 Errata 630-0213-0101)
85 */
86static ushort dma_ep = 1;
87module_param(dma_ep, ushort, 0644);
88
89/*
90 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
91 * mode 0 == Slow DREQ mode
92 * mode 1 == Fast DREQ mode
93 * mode 2 == Burst mode
94 */
95static ushort dma_mode = 2;
96module_param(dma_mode, ushort, 0644);
97#else
98#define use_dma 0
99#define dma_ep 1
100#define dma_mode 2
101#endif
102
103/*
104 * fifo_mode: net2272 buffer configuration:
105 * mode 0 == ep-{a,b,c} 512db each
106 * mode 1 == ep-a 1k, ep-{b,c} 512db
107 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
108 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
109 */
110static ushort fifo_mode = 0;
111module_param(fifo_mode, ushort, 0644);
112
113/*
114 * enable_suspend: When enabled, the driver will respond to
115 * USB suspend requests by powering down the NET2272. Otherwise,
116 * USB suspend requests will be ignored. This is acceptible for
117 * self-powered devices. For bus powered devices set this to 1.
118 */
119static ushort enable_suspend = 0;
120module_param(enable_suspend, ushort, 0644);
121
122static void assert_out_naking(struct net2272_ep *ep, const char *where)
123{
124 u8 tmp;
125
126#ifndef DEBUG
127 return;
128#endif
129
130 tmp = net2272_ep_read(ep, EP_STAT0);
131 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
132 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
133 ep->ep.name, where, tmp);
134 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
135 }
136}
137#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
138
139static void stop_out_naking(struct net2272_ep *ep)
140{
141 u8 tmp = net2272_ep_read(ep, EP_STAT0);
142
143 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
144 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
145}
146
147#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
148
149static char *type_string(u8 bmAttributes)
150{
151 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
152 case USB_ENDPOINT_XFER_BULK: return "bulk";
153 case USB_ENDPOINT_XFER_ISOC: return "iso";
154 case USB_ENDPOINT_XFER_INT: return "intr";
155 default: return "control";
156 }
157}
158
159static char *buf_state_string(unsigned state)
160{
161 switch (state) {
162 case BUFF_FREE: return "free";
163 case BUFF_VALID: return "valid";
164 case BUFF_LCL: return "local";
165 case BUFF_USB: return "usb";
166 default: return "unknown";
167 }
168}
169
170static char *dma_mode_string(void)
171{
172 if (!use_dma)
173 return "PIO";
174 switch (dma_mode) {
175 case 0: return "SLOW DREQ";
176 case 1: return "FAST DREQ";
177 case 2: return "BURST";
178 default: return "invalid";
179 }
180}
181
182static void net2272_dequeue_all(struct net2272_ep *);
183static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
184static int net2272_fifo_status(struct usb_ep *);
185
186static struct usb_ep_ops net2272_ep_ops;
187
188/*---------------------------------------------------------------------------*/
189
190static int
191net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
192{
193 struct net2272 *dev;
194 struct net2272_ep *ep;
195 u32 max;
196 u8 tmp;
197 unsigned long flags;
198
199 ep = container_of(_ep, struct net2272_ep, ep);
200 if (!_ep || !desc || ep->desc || _ep->name == ep0name
201 || desc->bDescriptorType != USB_DT_ENDPOINT)
202 return -EINVAL;
203 dev = ep->dev;
204 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
205 return -ESHUTDOWN;
206
207 max = le16_to_cpu(desc->wMaxPacketSize) & 0x1fff;
208
209 spin_lock_irqsave(&dev->lock, flags);
210 _ep->maxpacket = max & 0x7fff;
211 ep->desc = desc;
212
213 /* net2272_ep_reset() has already been called */
214 ep->stopped = 0;
215 ep->wedged = 0;
216
217 /* set speed-dependent max packet */
218 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
219 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
220
221 /* set type, direction, address; reset fifo counters */
222 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
223 tmp = usb_endpoint_type(desc);
224 if (usb_endpoint_xfer_bulk(desc)) {
225 /* catch some particularly blatant driver bugs */
226 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
227 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
228 spin_unlock_irqrestore(&dev->lock, flags);
229 return -ERANGE;
230 }
231 }
232 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
233 tmp <<= ENDPOINT_TYPE;
234 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
235 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
236 tmp |= (1 << ENDPOINT_ENABLE);
237
238 /* for OUT transfers, block the rx fifo until a read is posted */
239 ep->is_in = usb_endpoint_dir_in(desc);
240 if (!ep->is_in)
241 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
242
243 net2272_ep_write(ep, EP_CFG, tmp);
244
245 /* enable irqs */
246 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
247 net2272_write(dev, IRQENB0, tmp);
248
249 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
250 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
251 | net2272_ep_read(ep, EP_IRQENB);
252 net2272_ep_write(ep, EP_IRQENB, tmp);
253
254 tmp = desc->bEndpointAddress;
255 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
256 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
257 type_string(desc->bmAttributes), max,
258 net2272_ep_read(ep, EP_CFG));
259
260 spin_unlock_irqrestore(&dev->lock, flags);
261 return 0;
262}
263
264static void net2272_ep_reset(struct net2272_ep *ep)
265{
266 u8 tmp;
267
268 ep->desc = NULL;
269 INIT_LIST_HEAD(&ep->queue);
270
271 ep->ep.maxpacket = ~0;
272 ep->ep.ops = &net2272_ep_ops;
273
274 /* disable irqs, endpoint */
275 net2272_ep_write(ep, EP_IRQENB, 0);
276
277 /* init to our chosen defaults, notably so that we NAK OUT
278 * packets until the driver queues a read.
279 */
280 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
281 net2272_ep_write(ep, EP_RSPSET, tmp);
282
283 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
284 if (ep->num != 0)
285 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
286
287 net2272_ep_write(ep, EP_RSPCLR, tmp);
288
289 /* scrub most status bits, and flush any fifo state */
290 net2272_ep_write(ep, EP_STAT0,
291 (1 << DATA_IN_TOKEN_INTERRUPT)
292 | (1 << DATA_OUT_TOKEN_INTERRUPT)
293 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
294 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
295 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
296
297 net2272_ep_write(ep, EP_STAT1,
298 (1 << TIMEOUT)
299 | (1 << USB_OUT_ACK_SENT)
300 | (1 << USB_OUT_NAK_SENT)
301 | (1 << USB_IN_ACK_RCVD)
302 | (1 << USB_IN_NAK_SENT)
303 | (1 << USB_STALL_SENT)
304 | (1 << LOCAL_OUT_ZLP)
305 | (1 << BUFFER_FLUSH));
306
307 /* fifo size is handled seperately */
308}
309
310static int net2272_disable(struct usb_ep *_ep)
311{
312 struct net2272_ep *ep;
313 unsigned long flags;
314
315 ep = container_of(_ep, struct net2272_ep, ep);
316 if (!_ep || !ep->desc || _ep->name == ep0name)
317 return -EINVAL;
318
319 spin_lock_irqsave(&ep->dev->lock, flags);
320 net2272_dequeue_all(ep);
321 net2272_ep_reset(ep);
322
323 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
324
325 spin_unlock_irqrestore(&ep->dev->lock, flags);
326 return 0;
327}
328
329/*---------------------------------------------------------------------------*/
330
331static struct usb_request *
332net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
333{
334 struct net2272_ep *ep;
335 struct net2272_request *req;
336
337 if (!_ep)
338 return NULL;
339 ep = container_of(_ep, struct net2272_ep, ep);
340
341 req = kzalloc(sizeof(*req), gfp_flags);
342 if (!req)
343 return NULL;
344
345 req->req.dma = DMA_ADDR_INVALID;
346 INIT_LIST_HEAD(&req->queue);
347
348 return &req->req;
349}
350
351static void
352net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
353{
354 struct net2272_ep *ep;
355 struct net2272_request *req;
356
357 ep = container_of(_ep, struct net2272_ep, ep);
358 if (!_ep || !_req)
359 return;
360
361 req = container_of(_req, struct net2272_request, req);
362 WARN_ON(!list_empty(&req->queue));
363 kfree(req);
364}
365
366static void
367net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
368{
369 struct net2272 *dev;
370 unsigned stopped = ep->stopped;
371
372 if (ep->num == 0) {
373 if (ep->dev->protocol_stall) {
374 ep->stopped = 1;
375 set_halt(ep);
376 }
377 allow_status(ep);
378 }
379
380 list_del_init(&req->queue);
381
382 if (req->req.status == -EINPROGRESS)
383 req->req.status = status;
384 else
385 status = req->req.status;
386
387 dev = ep->dev;
388 if (use_dma && req->mapped) {
389 dma_unmap_single(dev->dev, req->req.dma, req->req.length,
390 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
391 req->req.dma = DMA_ADDR_INVALID;
392 req->mapped = 0;
393 }
394
395 if (status && status != -ESHUTDOWN)
396 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
397 ep->ep.name, &req->req, status,
398 req->req.actual, req->req.length, req->req.buf);
399
400 /* don't modify queue heads during completion callback */
401 ep->stopped = 1;
402 spin_unlock(&dev->lock);
403 req->req.complete(&ep->ep, &req->req);
404 spin_lock(&dev->lock);
405 ep->stopped = stopped;
406}
407
408static int
409net2272_write_packet(struct net2272_ep *ep, u8 *buf,
410 struct net2272_request *req, unsigned max)
411{
412 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
413 u16 *bufp;
414 unsigned length, count;
415 u8 tmp;
416
417 length = min(req->req.length - req->req.actual, max);
418 req->req.actual += length;
419
420 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
421 ep->ep.name, req, max, length,
422 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
423
424 count = length;
425 bufp = (u16 *)buf;
426
427 while (likely(count >= 2)) {
428 /* no byte-swap required; chip endian set during init */
429 writew(*bufp++, ep_data);
430 count -= 2;
431 }
432 buf = (u8 *)bufp;
433
434 /* write final byte by placing the NET2272 into 8-bit mode */
435 if (unlikely(count)) {
436 tmp = net2272_read(ep->dev, LOCCTL);
437 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
438 writeb(*buf, ep_data);
439 net2272_write(ep->dev, LOCCTL, tmp);
440 }
441 return length;
442}
443
444/* returns: 0: still running, 1: completed, negative: errno */
445static int
446net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
447{
448 u8 *buf;
449 unsigned count, max;
450 int status;
451
452 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
453 ep->ep.name, req->req.actual, req->req.length);
454
455 /*
456 * Keep loading the endpoint until the final packet is loaded,
457 * or the endpoint buffer is full.
458 */
459 top:
460 /*
461 * Clear interrupt status
462 * - Packet Transmitted interrupt will become set again when the
463 * host successfully takes another packet
464 */
465 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
466 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
467 buf = req->req.buf + req->req.actual;
468 prefetch(buf);
469
470 /* force pagesel */
471 net2272_ep_read(ep, EP_STAT0);
472
473 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
474 (net2272_ep_read(ep, EP_AVAIL0));
475
476 if (max < ep->ep.maxpacket)
477 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
478 | (net2272_ep_read(ep, EP_AVAIL0));
479
480 count = net2272_write_packet(ep, buf, req, max);
481 /* see if we are done */
482 if (req->req.length == req->req.actual) {
483 /* validate short or zlp packet */
484 if (count < ep->ep.maxpacket)
485 set_fifo_bytecount(ep, 0);
486 net2272_done(ep, req, 0);
487
488 if (!list_empty(&ep->queue)) {
489 req = list_entry(ep->queue.next,
490 struct net2272_request,
491 queue);
492 status = net2272_kick_dma(ep, req);
493
494 if (status < 0)
495 if ((net2272_ep_read(ep, EP_STAT0)
496 & (1 << BUFFER_EMPTY)))
497 goto top;
498 }
499 return 1;
500 }
501 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
502 }
503 return 0;
504}
505
506static void
507net2272_out_flush(struct net2272_ep *ep)
508{
509 ASSERT_OUT_NAKING(ep);
510
511 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
512 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
513 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
514}
515
516static int
517net2272_read_packet(struct net2272_ep *ep, u8 *buf,
518 struct net2272_request *req, unsigned avail)
519{
520 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
521 unsigned is_short;
522 u16 *bufp;
523
524 req->req.actual += avail;
525
526 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
527 ep->ep.name, req, avail,
528 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
529
530 is_short = (avail < ep->ep.maxpacket);
531
532 if (unlikely(avail == 0)) {
533 /* remove any zlp from the buffer */
534 (void)readw(ep_data);
535 return is_short;
536 }
537
538 /* Ensure we get the final byte */
539 if (unlikely(avail % 2))
540 avail++;
541 bufp = (u16 *)buf;
542
543 do {
544 *bufp++ = readw(ep_data);
545 avail -= 2;
546 } while (avail);
547
548 /*
549 * To avoid false endpoint available race condition must read
550 * ep stat0 twice in the case of a short transfer
551 */
552 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
553 net2272_ep_read(ep, EP_STAT0);
554
555 return is_short;
556}
557
558static int
559net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
560{
561 u8 *buf;
562 unsigned is_short;
563 int count;
564 int tmp;
565 int cleanup = 0;
566 int status = -1;
567
568 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
569 ep->ep.name, req->req.actual, req->req.length);
570
571 top:
572 do {
573 buf = req->req.buf + req->req.actual;
574 prefetchw(buf);
575
576 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
577 | net2272_ep_read(ep, EP_AVAIL0);
578
579 net2272_ep_write(ep, EP_STAT0,
580 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
581 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
582
583 tmp = req->req.length - req->req.actual;
584
585 if (count > tmp) {
586 if ((tmp % ep->ep.maxpacket) != 0) {
587 dev_err(ep->dev->dev,
588 "%s out fifo %d bytes, expected %d\n",
589 ep->ep.name, count, tmp);
590 cleanup = 1;
591 }
592 count = (tmp > 0) ? tmp : 0;
593 }
594
595 is_short = net2272_read_packet(ep, buf, req, count);
596
597 /* completion */
598 if (unlikely(cleanup || is_short ||
599 ((req->req.actual == req->req.length)
600 && !req->req.zero))) {
601
602 if (cleanup) {
603 net2272_out_flush(ep);
604 net2272_done(ep, req, -EOVERFLOW);
605 } else
606 net2272_done(ep, req, 0);
607
608 /* re-initialize endpoint transfer registers
609 * otherwise they may result in erroneous pre-validation
610 * for subsequent control reads
611 */
612 if (unlikely(ep->num == 0)) {
613 net2272_ep_write(ep, EP_TRANSFER2, 0);
614 net2272_ep_write(ep, EP_TRANSFER1, 0);
615 net2272_ep_write(ep, EP_TRANSFER0, 0);
616 }
617
618 if (!list_empty(&ep->queue)) {
619 req = list_entry(ep->queue.next,
620 struct net2272_request, queue);
621 status = net2272_kick_dma(ep, req);
622 if ((status < 0) &&
623 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
624 goto top;
625 }
626 return 1;
627 }
628 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
629
630 return 0;
631}
632
633static void
634net2272_pio_advance(struct net2272_ep *ep)
635{
636 struct net2272_request *req;
637
638 if (unlikely(list_empty(&ep->queue)))
639 return;
640
641 req = list_entry(ep->queue.next, struct net2272_request, queue);
642 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
643}
644
645/* returns 0 on success, else negative errno */
646static int
647net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
648 unsigned len, unsigned dir)
649{
650 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
651 ep, buf, len, dir);
652
653 /* The NET2272 only supports a single dma channel */
654 if (dev->dma_busy)
655 return -EBUSY;
656 /*
657 * EP_TRANSFER (used to determine the number of bytes received
658 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
659 */
660 if ((dir == 1) && (len > 0x1000000))
661 return -EINVAL;
662
663 dev->dma_busy = 1;
664
665 /* initialize platform's dma */
666#ifdef CONFIG_PCI
667 /* NET2272 addr, buffer addr, length, etc. */
668 switch (dev->dev_id) {
669 case PCI_DEVICE_ID_RDK1:
670 /* Setup PLX 9054 DMA mode */
671 writel((1 << LOCAL_BUS_WIDTH) |
672 (1 << TA_READY_INPUT_ENABLE) |
673 (0 << LOCAL_BURST_ENABLE) |
674 (1 << DONE_INTERRUPT_ENABLE) |
675 (1 << LOCAL_ADDRESSING_MODE) |
676 (1 << DEMAND_MODE) |
677 (1 << DMA_EOT_ENABLE) |
678 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
679 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
680 dev->rdk1.plx9054_base_addr + DMAMODE0);
681
682 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
683 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
684 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
685 writel((dir << DIRECTION_OF_TRANSFER) |
686 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
687 dev->rdk1.plx9054_base_addr + DMADPR0);
688 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
689 readl(dev->rdk1.plx9054_base_addr + INTCSR),
690 dev->rdk1.plx9054_base_addr + INTCSR);
691
692 break;
693 }
694#endif
695
696 net2272_write(dev, DMAREQ,
697 (0 << DMA_BUFFER_VALID) |
698 (1 << DMA_REQUEST_ENABLE) |
699 (1 << DMA_CONTROL_DACK) |
700 (dev->dma_eot_polarity << EOT_POLARITY) |
701 (dev->dma_dack_polarity << DACK_POLARITY) |
702 (dev->dma_dreq_polarity << DREQ_POLARITY) |
703 ((ep >> 1) << DMA_ENDPOINT_SELECT));
704
705 (void) net2272_read(dev, SCRATCH);
706
707 return 0;
708}
709
710static void
711net2272_start_dma(struct net2272 *dev)
712{
713 /* start platform's dma controller */
714#ifdef CONFIG_PCI
715 switch (dev->dev_id) {
716 case PCI_DEVICE_ID_RDK1:
717 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
718 dev->rdk1.plx9054_base_addr + DMACSR0);
719 break;
720 }
721#endif
722}
723
724/* returns 0 on success, else negative errno */
725static int
726net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
727{
728 unsigned size;
729 u8 tmp;
730
731 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
732 return -EINVAL;
733
734 /* don't use dma for odd-length transfers
735 * otherwise, we'd need to deal with the last byte with pio
736 */
737 if (req->req.length & 1)
738 return -EINVAL;
739
740 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
741 ep->ep.name, req, (unsigned long long) req->req.dma);
742
743 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
744
745 /* The NET2272 can only use DMA on one endpoint at a time */
746 if (ep->dev->dma_busy)
747 return -EBUSY;
748
749 /* Make sure we only DMA an even number of bytes (we'll use
750 * pio to complete the transfer)
751 */
752 size = req->req.length;
753 size &= ~1;
754
755 /* device-to-host transfer */
756 if (ep->is_in) {
757 /* initialize platform's dma controller */
758 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
759 /* unable to obtain DMA channel; return error and use pio mode */
760 return -EBUSY;
761 req->req.actual += size;
762
763 /* host-to-device transfer */
764 } else {
765 tmp = net2272_ep_read(ep, EP_STAT0);
766
767 /* initialize platform's dma controller */
768 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
769 /* unable to obtain DMA channel; return error and use pio mode */
770 return -EBUSY;
771
772 if (!(tmp & (1 << BUFFER_EMPTY)))
773 ep->not_empty = 1;
774 else
775 ep->not_empty = 0;
776
777
778 /* allow the endpoint's buffer to fill */
779 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
780
781 /* this transfer completed and data's already in the fifo
782 * return error so pio gets used.
783 */
784 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
785
786 /* deassert dreq */
787 net2272_write(ep->dev, DMAREQ,
788 (0 << DMA_BUFFER_VALID) |
789 (0 << DMA_REQUEST_ENABLE) |
790 (1 << DMA_CONTROL_DACK) |
791 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
792 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
793 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
794 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
795
796 return -EBUSY;
797 }
798 }
799
800 /* Don't use per-packet interrupts: use dma interrupts only */
801 net2272_ep_write(ep, EP_IRQENB, 0);
802
803 net2272_start_dma(ep->dev);
804
805 return 0;
806}
807
808static void net2272_cancel_dma(struct net2272 *dev)
809{
810#ifdef CONFIG_PCI
811 switch (dev->dev_id) {
812 case PCI_DEVICE_ID_RDK1:
813 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
814 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
815 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
816 (1 << CHANNEL_DONE)))
817 continue; /* wait for dma to stabalize */
818
819 /* dma abort generates an interrupt */
820 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
821 dev->rdk1.plx9054_base_addr + DMACSR0);
822 break;
823 }
824#endif
825
826 dev->dma_busy = 0;
827}
828
829/*---------------------------------------------------------------------------*/
830
831static int
832net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
833{
834 struct net2272_request *req;
835 struct net2272_ep *ep;
836 struct net2272 *dev;
837 unsigned long flags;
838 int status = -1;
839 u8 s;
840
841 req = container_of(_req, struct net2272_request, req);
842 if (!_req || !_req->complete || !_req->buf
843 || !list_empty(&req->queue))
844 return -EINVAL;
845 ep = container_of(_ep, struct net2272_ep, ep);
846 if (!_ep || (!ep->desc && ep->num != 0))
847 return -EINVAL;
848 dev = ep->dev;
849 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
850 return -ESHUTDOWN;
851
852 /* set up dma mapping in case the caller didn't */
853 if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
854 _req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
855 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
856 req->mapped = 1;
857 }
858
859 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
860 _ep->name, _req, _req->length, _req->buf,
861 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
862
863 spin_lock_irqsave(&dev->lock, flags);
864
865 _req->status = -EINPROGRESS;
866 _req->actual = 0;
867
868 /* kickstart this i/o queue? */
869 if (list_empty(&ep->queue) && !ep->stopped) {
870 /* maybe there's no control data, just status ack */
871 if (ep->num == 0 && _req->length == 0) {
872 net2272_done(ep, req, 0);
873 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
874 goto done;
875 }
876
877 /* Return zlp, don't let it block subsequent packets */
878 s = net2272_ep_read(ep, EP_STAT0);
879 if (s & (1 << BUFFER_EMPTY)) {
880 /* Buffer is empty check for a blocking zlp, handle it */
881 if ((s & (1 << NAK_OUT_PACKETS)) &&
882 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
883 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
884 /*
885 * Request is going to terminate with a short packet ...
886 * hope the client is ready for it!
887 */
888 status = net2272_read_fifo(ep, req);
889 /* clear short packet naking */
890 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
891 goto done;
892 }
893 }
894
895 /* try dma first */
896 status = net2272_kick_dma(ep, req);
897
898 if (status < 0) {
899 /* dma failed (most likely in use by another endpoint)
900 * fallback to pio
901 */
902 status = 0;
903
904 if (ep->is_in)
905 status = net2272_write_fifo(ep, req);
906 else {
907 s = net2272_ep_read(ep, EP_STAT0);
908 if ((s & (1 << BUFFER_EMPTY)) == 0)
909 status = net2272_read_fifo(ep, req);
910 }
911
912 if (unlikely(status != 0)) {
913 if (status > 0)
914 status = 0;
915 req = NULL;
916 }
917 }
918 }
919 if (likely(req != 0))
920 list_add_tail(&req->queue, &ep->queue);
921
922 if (likely(!list_empty(&ep->queue)))
923 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
924 done:
925 spin_unlock_irqrestore(&dev->lock, flags);
926
927 return 0;
928}
929
930/* dequeue ALL requests */
931static void
932net2272_dequeue_all(struct net2272_ep *ep)
933{
934 struct net2272_request *req;
935
936 /* called with spinlock held */
937 ep->stopped = 1;
938
939 while (!list_empty(&ep->queue)) {
940 req = list_entry(ep->queue.next,
941 struct net2272_request,
942 queue);
943 net2272_done(ep, req, -ESHUTDOWN);
944 }
945}
946
947/* dequeue JUST ONE request */
948static int
949net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
950{
951 struct net2272_ep *ep;
952 struct net2272_request *req;
953 unsigned long flags;
954 int stopped;
955
956 ep = container_of(_ep, struct net2272_ep, ep);
957 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
958 return -EINVAL;
959
960 spin_lock_irqsave(&ep->dev->lock, flags);
961 stopped = ep->stopped;
962 ep->stopped = 1;
963
964 /* make sure it's still queued on this endpoint */
965 list_for_each_entry(req, &ep->queue, queue) {
966 if (&req->req == _req)
967 break;
968 }
969 if (&req->req != _req) {
970 spin_unlock_irqrestore(&ep->dev->lock, flags);
971 return -EINVAL;
972 }
973
974 /* queue head may be partially complete */
975 if (ep->queue.next == &req->queue) {
976 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
977 net2272_done(ep, req, -ECONNRESET);
978 }
979 req = NULL;
980 ep->stopped = stopped;
981
982 spin_unlock_irqrestore(&ep->dev->lock, flags);
983 return 0;
984}
985
986/*---------------------------------------------------------------------------*/
987
988static int
989net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
990{
991 struct net2272_ep *ep;
992 unsigned long flags;
993 int ret = 0;
994
995 ep = container_of(_ep, struct net2272_ep, ep);
996 if (!_ep || (!ep->desc && ep->num != 0))
997 return -EINVAL;
998 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
999 return -ESHUTDOWN;
1000 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
1001 return -EINVAL;
1002
1003 spin_lock_irqsave(&ep->dev->lock, flags);
1004 if (!list_empty(&ep->queue))
1005 ret = -EAGAIN;
1006 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1007 ret = -EAGAIN;
1008 else {
1009 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1010 value ? "set" : "clear",
1011 wedged ? "wedge" : "halt");
1012 /* set/clear */
1013 if (value) {
1014 if (ep->num == 0)
1015 ep->dev->protocol_stall = 1;
1016 else
1017 set_halt(ep);
1018 if (wedged)
1019 ep->wedged = 1;
1020 } else {
1021 clear_halt(ep);
1022 ep->wedged = 0;
1023 }
1024 }
1025 spin_unlock_irqrestore(&ep->dev->lock, flags);
1026
1027 return ret;
1028}
1029
1030static int
1031net2272_set_halt(struct usb_ep *_ep, int value)
1032{
1033 return net2272_set_halt_and_wedge(_ep, value, 0);
1034}
1035
1036static int
1037net2272_set_wedge(struct usb_ep *_ep)
1038{
1039 if (!_ep || _ep->name == ep0name)
1040 return -EINVAL;
1041 return net2272_set_halt_and_wedge(_ep, 1, 1);
1042}
1043
1044static int
1045net2272_fifo_status(struct usb_ep *_ep)
1046{
1047 struct net2272_ep *ep;
1048 u16 avail;
1049
1050 ep = container_of(_ep, struct net2272_ep, ep);
1051 if (!_ep || (!ep->desc && ep->num != 0))
1052 return -ENODEV;
1053 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1054 return -ESHUTDOWN;
1055
1056 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1057 avail |= net2272_ep_read(ep, EP_AVAIL0);
1058 if (avail > ep->fifo_size)
1059 return -EOVERFLOW;
1060 if (ep->is_in)
1061 avail = ep->fifo_size - avail;
1062 return avail;
1063}
1064
1065static void
1066net2272_fifo_flush(struct usb_ep *_ep)
1067{
1068 struct net2272_ep *ep;
1069
1070 ep = container_of(_ep, struct net2272_ep, ep);
1071 if (!_ep || (!ep->desc && ep->num != 0))
1072 return;
1073 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1074 return;
1075
1076 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1077}
1078
1079static struct usb_ep_ops net2272_ep_ops = {
1080 .enable = net2272_enable,
1081 .disable = net2272_disable,
1082
1083 .alloc_request = net2272_alloc_request,
1084 .free_request = net2272_free_request,
1085
1086 .queue = net2272_queue,
1087 .dequeue = net2272_dequeue,
1088
1089 .set_halt = net2272_set_halt,
1090 .set_wedge = net2272_set_wedge,
1091 .fifo_status = net2272_fifo_status,
1092 .fifo_flush = net2272_fifo_flush,
1093};
1094
1095/*---------------------------------------------------------------------------*/
1096
1097static int
1098net2272_get_frame(struct usb_gadget *_gadget)
1099{
1100 struct net2272 *dev;
1101 unsigned long flags;
1102 u16 ret;
1103
1104 if (!_gadget)
1105 return -ENODEV;
1106 dev = container_of(_gadget, struct net2272, gadget);
1107 spin_lock_irqsave(&dev->lock, flags);
1108
1109 ret = net2272_read(dev, FRAME1) << 8;
1110 ret |= net2272_read(dev, FRAME0);
1111
1112 spin_unlock_irqrestore(&dev->lock, flags);
1113 return ret;
1114}
1115
1116static int
1117net2272_wakeup(struct usb_gadget *_gadget)
1118{
1119 struct net2272 *dev;
1120 u8 tmp;
1121 unsigned long flags;
1122
1123 if (!_gadget)
1124 return 0;
1125 dev = container_of(_gadget, struct net2272, gadget);
1126
1127 spin_lock_irqsave(&dev->lock, flags);
1128 tmp = net2272_read(dev, USBCTL0);
1129 if (tmp & (1 << IO_WAKEUP_ENABLE))
1130 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1131
1132 spin_unlock_irqrestore(&dev->lock, flags);
1133
1134 return 0;
1135}
1136
1137static int
1138net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1139{
1140 struct net2272 *dev;
1141
1142 if (!_gadget)
1143 return -ENODEV;
1144 dev = container_of(_gadget, struct net2272, gadget);
1145
1146 dev->is_selfpowered = value;
1147
1148 return 0;
1149}
1150
1151static int
1152net2272_pullup(struct usb_gadget *_gadget, int is_on)
1153{
1154 struct net2272 *dev;
1155 u8 tmp;
1156 unsigned long flags;
1157
1158 if (!_gadget)
1159 return -ENODEV;
1160 dev = container_of(_gadget, struct net2272, gadget);
1161
1162 spin_lock_irqsave(&dev->lock, flags);
1163 tmp = net2272_read(dev, USBCTL0);
1164 dev->softconnect = (is_on != 0);
1165 if (is_on)
1166 tmp |= (1 << USB_DETECT_ENABLE);
1167 else
1168 tmp &= ~(1 << USB_DETECT_ENABLE);
1169 net2272_write(dev, USBCTL0, tmp);
1170 spin_unlock_irqrestore(&dev->lock, flags);
1171
1172 return 0;
1173}
1174
1175static int net2272_start(struct usb_gadget_driver *driver,
1176 int (*bind)(struct usb_gadget *));
1177static int net2272_stop(struct usb_gadget_driver *driver);
1178
1179static const struct usb_gadget_ops net2272_ops = {
1180 .get_frame = net2272_get_frame,
1181 .wakeup = net2272_wakeup,
1182 .set_selfpowered = net2272_set_selfpowered,
1183 .pullup = net2272_pullup,
1184 .start = net2272_start,
1185 .stop = net2272_stop,
1186};
1187
1188/*---------------------------------------------------------------------------*/
1189
1190static ssize_t
1191net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1192{
1193 struct net2272 *dev;
1194 char *next;
1195 unsigned size, t;
1196 unsigned long flags;
1197 u8 t1, t2;
1198 int i;
1199 const char *s;
1200
1201 dev = dev_get_drvdata(_dev);
1202 next = buf;
1203 size = PAGE_SIZE;
1204 spin_lock_irqsave(&dev->lock, flags);
1205
1206 if (dev->driver)
1207 s = dev->driver->driver.name;
1208 else
1209 s = "(none)";
1210
1211 /* Main Control Registers */
1212 t = scnprintf(next, size, "%s version %s,"
1213 "chiprev %02x, locctl %02x\n"
1214 "irqenb0 %02x irqenb1 %02x "
1215 "irqstat0 %02x irqstat1 %02x\n",
1216 driver_name, driver_vers, dev->chiprev,
1217 net2272_read(dev, LOCCTL),
1218 net2272_read(dev, IRQENB0),
1219 net2272_read(dev, IRQENB1),
1220 net2272_read(dev, IRQSTAT0),
1221 net2272_read(dev, IRQSTAT1));
1222 size -= t;
1223 next += t;
1224
1225 /* DMA */
1226 t1 = net2272_read(dev, DMAREQ);
1227 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1228 t1, ep_name[(t1 & 0x01) + 1],
1229 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1230 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1231 t1 & (1 << DMA_REQUEST) ? "req " : "",
1232 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1233 size -= t;
1234 next += t;
1235
1236 /* USB Control Registers */
1237 t1 = net2272_read(dev, USBCTL1);
1238 if (t1 & (1 << VBUS_PIN)) {
1239 if (t1 & (1 << USB_HIGH_SPEED))
1240 s = "high speed";
1241 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1242 s = "powered";
1243 else
1244 s = "full speed";
1245 } else
1246 s = "not attached";
1247 t = scnprintf(next, size,
1248 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1249 net2272_read(dev, USBCTL0), t1,
1250 net2272_read(dev, OURADDR), s);
1251 size -= t;
1252 next += t;
1253
1254 /* Endpoint Registers */
1255 for (i = 0; i < 4; ++i) {
1256 struct net2272_ep *ep;
1257
1258 ep = &dev->ep[i];
1259 if (i && !ep->desc)
1260 continue;
1261
1262 t1 = net2272_ep_read(ep, EP_CFG);
1263 t2 = net2272_ep_read(ep, EP_RSPSET);
1264 t = scnprintf(next, size,
1265 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1266 "irqenb %02x\n",
1267 ep->ep.name, t1, t2,
1268 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1269 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1270 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1271 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1272 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1273 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1274 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1275 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1276 net2272_ep_read(ep, EP_IRQENB));
1277 size -= t;
1278 next += t;
1279
1280 t = scnprintf(next, size,
1281 "\tstat0 %02x stat1 %02x avail %04x "
1282 "(ep%d%s-%s)%s\n",
1283 net2272_ep_read(ep, EP_STAT0),
1284 net2272_ep_read(ep, EP_STAT1),
1285 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1286 t1 & 0x0f,
1287 ep->is_in ? "in" : "out",
1288 type_string(t1 >> 5),
1289 ep->stopped ? "*" : "");
1290 size -= t;
1291 next += t;
1292
1293 t = scnprintf(next, size,
1294 "\tep_transfer %06x\n",
1295 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1296 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1297 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1298 size -= t;
1299 next += t;
1300
1301 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1302 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1303 t = scnprintf(next, size,
1304 "\tbuf-a %s buf-b %s\n",
1305 buf_state_string(t1),
1306 buf_state_string(t2));
1307 size -= t;
1308 next += t;
1309 }
1310
1311 spin_unlock_irqrestore(&dev->lock, flags);
1312
1313 return PAGE_SIZE - size;
1314}
1315static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
1316
1317/*---------------------------------------------------------------------------*/
1318
1319static void
1320net2272_set_fifo_mode(struct net2272 *dev, int mode)
1321{
1322 u8 tmp;
1323
1324 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1325 tmp |= (mode << 6);
1326 net2272_write(dev, LOCCTL, tmp);
1327
1328 INIT_LIST_HEAD(&dev->gadget.ep_list);
1329
1330 /* always ep-a, ep-c ... maybe not ep-b */
1331 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1332
1333 switch (mode) {
1334 case 0:
1335 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1336 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1337 break;
1338 case 1:
1339 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1340 dev->ep[1].fifo_size = 1024;
1341 dev->ep[2].fifo_size = 512;
1342 break;
1343 case 2:
1344 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1345 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1346 break;
1347 case 3:
1348 dev->ep[1].fifo_size = 1024;
1349 break;
1350 }
1351
1352 /* ep-c is always 2 512 byte buffers */
1353 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1354 dev->ep[3].fifo_size = 512;
1355}
1356
1357/*---------------------------------------------------------------------------*/
1358
1359static struct net2272 *the_controller;
1360
1361static void
1362net2272_usb_reset(struct net2272 *dev)
1363{
1364 dev->gadget.speed = USB_SPEED_UNKNOWN;
1365
1366 net2272_cancel_dma(dev);
1367
1368 net2272_write(dev, IRQENB0, 0);
1369 net2272_write(dev, IRQENB1, 0);
1370
1371 /* clear irq state */
1372 net2272_write(dev, IRQSTAT0, 0xff);
1373 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1374
1375 net2272_write(dev, DMAREQ,
1376 (0 << DMA_BUFFER_VALID) |
1377 (0 << DMA_REQUEST_ENABLE) |
1378 (1 << DMA_CONTROL_DACK) |
1379 (dev->dma_eot_polarity << EOT_POLARITY) |
1380 (dev->dma_dack_polarity << DACK_POLARITY) |
1381 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1382 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1383
1384 net2272_cancel_dma(dev);
1385 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1386
1387 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1388 * note that the higher level gadget drivers are expected to convert data to little endian.
1389 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1390 */
1391 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1392 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1393}
1394
1395static void
1396net2272_usb_reinit(struct net2272 *dev)
1397{
1398 int i;
1399
1400 /* basic endpoint init */
1401 for (i = 0; i < 4; ++i) {
1402 struct net2272_ep *ep = &dev->ep[i];
1403
1404 ep->ep.name = ep_name[i];
1405 ep->dev = dev;
1406 ep->num = i;
1407 ep->not_empty = 0;
1408
1409 if (use_dma && ep->num == dma_ep)
1410 ep->dma = 1;
1411
1412 if (i > 0 && i <= 3)
1413 ep->fifo_size = 512;
1414 else
1415 ep->fifo_size = 64;
1416 net2272_ep_reset(ep);
1417 }
1418 dev->ep[0].ep.maxpacket = 64;
1419
1420 dev->gadget.ep0 = &dev->ep[0].ep;
1421 dev->ep[0].stopped = 0;
1422 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1423}
1424
1425static void
1426net2272_ep0_start(struct net2272 *dev)
1427{
1428 struct net2272_ep *ep0 = &dev->ep[0];
1429
1430 net2272_ep_write(ep0, EP_RSPSET,
1431 (1 << NAK_OUT_PACKETS_MODE) |
1432 (1 << ALT_NAK_OUT_PACKETS));
1433 net2272_ep_write(ep0, EP_RSPCLR,
1434 (1 << HIDE_STATUS_PHASE) |
1435 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1436 net2272_write(dev, USBCTL0,
1437 (dev->softconnect << USB_DETECT_ENABLE) |
1438 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1439 (1 << IO_WAKEUP_ENABLE));
1440 net2272_write(dev, IRQENB0,
1441 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1442 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1443 (1 << DMA_DONE_INTERRUPT_ENABLE));
1444 net2272_write(dev, IRQENB1,
1445 (1 << VBUS_INTERRUPT_ENABLE) |
1446 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1447 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1448}
1449
1450/* when a driver is successfully registered, it will receive
1451 * control requests including set_configuration(), which enables
1452 * non-control requests. then usb traffic follows until a
1453 * disconnect is reported. then a host may connect again, or
1454 * the driver might get unbound.
1455 */
1456static int net2272_start(struct usb_gadget_driver *driver,
1457 int (*bind)(struct usb_gadget *))
1458{
1459 struct net2272 *dev = the_controller;
1460 int ret;
1461 unsigned i;
1462
1463 if (!driver || !bind || !driver->unbind || !driver->setup ||
1464 driver->speed != USB_SPEED_HIGH)
1465 return -EINVAL;
1466 if (!dev)
1467 return -ENODEV;
1468 if (dev->driver)
1469 return -EBUSY;
1470
1471 for (i = 0; i < 4; ++i)
1472 dev->ep[i].irqs = 0;
1473 /* hook up the driver ... */
1474 dev->softconnect = 1;
1475 driver->driver.bus = NULL;
1476 dev->driver = driver;
1477 dev->gadget.dev.driver = &driver->driver;
1478 ret = bind(&dev->gadget);
1479 if (ret) {
1480 dev_dbg(dev->dev, "bind to driver %s --> %d\n",
1481 driver->driver.name, ret);
1482 dev->driver = NULL;
1483 dev->gadget.dev.driver = NULL;
1484 return ret;
1485 }
1486
1487 /* ... then enable host detection and ep0; and we're ready
1488 * for set_configuration as well as eventual disconnect.
1489 */
1490 net2272_ep0_start(dev);
1491
1492 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1493
1494 return 0;
1495}
1496
1497static void
1498stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1499{
1500 int i;
1501
1502 /* don't disconnect if it's not connected */
1503 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1504 driver = NULL;
1505
1506 /* stop hardware; prevent new request submissions;
1507 * and kill any outstanding requests.
1508 */
1509 net2272_usb_reset(dev);
1510 for (i = 0; i < 4; ++i)
1511 net2272_dequeue_all(&dev->ep[i]);
1512
1513 /* report disconnect; the driver is already quiesced */
1514 if (driver) {
1515 spin_unlock(&dev->lock);
1516 driver->disconnect(&dev->gadget);
1517 spin_lock(&dev->lock);
1518
1519 }
1520 net2272_usb_reinit(dev);
1521}
1522
1523static int net2272_stop(struct usb_gadget_driver *driver)
1524{
1525 struct net2272 *dev = the_controller;
1526 unsigned long flags;
1527
1528 if (!dev)
1529 return -ENODEV;
1530 if (!driver || driver != dev->driver)
1531 return -EINVAL;
1532
1533 spin_lock_irqsave(&dev->lock, flags);
1534 stop_activity(dev, driver);
1535 spin_unlock_irqrestore(&dev->lock, flags);
1536
1537 net2272_pullup(&dev->gadget, 0);
1538
1539 driver->unbind(&dev->gadget);
1540 dev->gadget.dev.driver = NULL;
1541 dev->driver = NULL;
1542
1543 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1544 return 0;
1545}
1546
1547/*---------------------------------------------------------------------------*/
1548/* handle ep-a/ep-b dma completions */
1549static void
1550net2272_handle_dma(struct net2272_ep *ep)
1551{
1552 struct net2272_request *req;
1553 unsigned len;
1554 int status;
1555
1556 if (!list_empty(&ep->queue))
1557 req = list_entry(ep->queue.next,
1558 struct net2272_request, queue);
1559 else
1560 req = NULL;
1561
1562 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1563
1564 /* Ensure DREQ is de-asserted */
1565 net2272_write(ep->dev, DMAREQ,
1566 (0 << DMA_BUFFER_VALID)
1567 | (0 << DMA_REQUEST_ENABLE)
1568 | (1 << DMA_CONTROL_DACK)
1569 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1570 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1571 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1572 | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
1573
1574 ep->dev->dma_busy = 0;
1575
1576 net2272_ep_write(ep, EP_IRQENB,
1577 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1578 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1579 | net2272_ep_read(ep, EP_IRQENB));
1580
1581 /* device-to-host transfer completed */
1582 if (ep->is_in) {
1583 /* validate a short packet or zlp if necessary */
1584 if ((req->req.length % ep->ep.maxpacket != 0) ||
1585 req->req.zero)
1586 set_fifo_bytecount(ep, 0);
1587
1588 net2272_done(ep, req, 0);
1589 if (!list_empty(&ep->queue)) {
1590 req = list_entry(ep->queue.next,
1591 struct net2272_request, queue);
1592 status = net2272_kick_dma(ep, req);
1593 if (status < 0)
1594 net2272_pio_advance(ep);
1595 }
1596
1597 /* host-to-device transfer completed */
1598 } else {
1599 /* terminated with a short packet? */
1600 if (net2272_read(ep->dev, IRQSTAT0) &
1601 (1 << DMA_DONE_INTERRUPT)) {
1602 /* abort system dma */
1603 net2272_cancel_dma(ep->dev);
1604 }
1605
1606 /* EP_TRANSFER will contain the number of bytes
1607 * actually received.
1608 * NOTE: There is no overflow detection on EP_TRANSFER:
1609 * We can't deal with transfers larger than 2^24 bytes!
1610 */
1611 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1612 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1613 | (net2272_ep_read(ep, EP_TRANSFER0));
1614
1615 if (ep->not_empty)
1616 len += 4;
1617
1618 req->req.actual += len;
1619
1620 /* get any remaining data */
1621 net2272_pio_advance(ep);
1622 }
1623}
1624
1625/*---------------------------------------------------------------------------*/
1626
1627static void
1628net2272_handle_ep(struct net2272_ep *ep)
1629{
1630 struct net2272_request *req;
1631 u8 stat0, stat1;
1632
1633 if (!list_empty(&ep->queue))
1634 req = list_entry(ep->queue.next,
1635 struct net2272_request, queue);
1636 else
1637 req = NULL;
1638
1639 /* ack all, and handle what we care about */
1640 stat0 = net2272_ep_read(ep, EP_STAT0);
1641 stat1 = net2272_ep_read(ep, EP_STAT1);
1642 ep->irqs++;
1643
1644 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1645 ep->ep.name, stat0, stat1, req ? &req->req : 0);
1646
1647 net2272_ep_write(ep, EP_STAT0, stat0 &
1648 ~((1 << NAK_OUT_PACKETS)
1649 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1650 net2272_ep_write(ep, EP_STAT1, stat1);
1651
1652 /* data packet(s) received (in the fifo, OUT)
1653 * direction must be validated, otherwise control read status phase
1654 * could be interpreted as a valid packet
1655 */
1656 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1657 net2272_pio_advance(ep);
1658 /* data packet(s) transmitted (IN) */
1659 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1660 net2272_pio_advance(ep);
1661}
1662
1663static struct net2272_ep *
1664net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1665{
1666 struct net2272_ep *ep;
1667
1668 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1669 return &dev->ep[0];
1670
1671 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1672 u8 bEndpointAddress;
1673
1674 if (!ep->desc)
1675 continue;
1676 bEndpointAddress = ep->desc->bEndpointAddress;
1677 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1678 continue;
1679 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1680 return ep;
1681 }
1682 return NULL;
1683}
1684
1685/*
1686 * USB Test Packet:
1687 * JKJKJKJK * 9
1688 * JJKKJJKK * 8
1689 * JJJJKKKK * 8
1690 * JJJJJJJKKKKKKK * 8
1691 * JJJJJJJK * 8
1692 * {JKKKKKKK * 10}, JK
1693 */
1694static const u8 net2272_test_packet[] = {
1695 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1696 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1697 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1698 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1699 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1700 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1701};
1702
1703static void
1704net2272_set_test_mode(struct net2272 *dev, int mode)
1705{
1706 int i;
1707
1708 /* Disable all net2272 interrupts:
1709 * Nothing but a power cycle should stop the test.
1710 */
1711 net2272_write(dev, IRQENB0, 0x00);
1712 net2272_write(dev, IRQENB1, 0x00);
1713
1714 /* Force tranceiver to high-speed */
1715 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1716
1717 net2272_write(dev, PAGESEL, 0);
1718 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1719 net2272_write(dev, EP_RSPCLR,
1720 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1721 | (1 << HIDE_STATUS_PHASE));
1722 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1723 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1724
1725 /* wait for status phase to complete */
1726 while (!(net2272_read(dev, EP_STAT0) &
1727 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1728 ;
1729
1730 /* Enable test mode */
1731 net2272_write(dev, USBTEST, mode);
1732
1733 /* load test packet */
1734 if (mode == TEST_PACKET) {
1735 /* switch to 8 bit mode */
1736 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1737 ~(1 << DATA_WIDTH));
1738
1739 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1740 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1741
1742 /* Validate test packet */
1743 net2272_write(dev, EP_TRANSFER0, 0);
1744 }
1745}
1746
1747static void
1748net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1749{
1750 struct net2272_ep *ep;
1751 u8 num, scratch;
1752
1753 /* starting a control request? */
1754 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1755 union {
1756 u8 raw[8];
1757 struct usb_ctrlrequest r;
1758 } u;
1759 int tmp = 0;
1760 struct net2272_request *req;
1761
1762 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1763 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1764 dev->gadget.speed = USB_SPEED_HIGH;
1765 else
1766 dev->gadget.speed = USB_SPEED_FULL;
1767 dev_dbg(dev->dev, "%s speed\n",
1768 (dev->gadget.speed == USB_SPEED_HIGH) ? "high" : "full");
1769 }
1770
1771 ep = &dev->ep[0];
1772 ep->irqs++;
1773
1774 /* make sure any leftover interrupt state is cleared */
1775 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1776 while (!list_empty(&ep->queue)) {
1777 req = list_entry(ep->queue.next,
1778 struct net2272_request, queue);
1779 net2272_done(ep, req,
1780 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1781 }
1782 ep->stopped = 0;
1783 dev->protocol_stall = 0;
1784 net2272_ep_write(ep, EP_STAT0,
1785 (1 << DATA_IN_TOKEN_INTERRUPT)
1786 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1787 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1788 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1789 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1790 net2272_ep_write(ep, EP_STAT1,
1791 (1 << TIMEOUT)
1792 | (1 << USB_OUT_ACK_SENT)
1793 | (1 << USB_OUT_NAK_SENT)
1794 | (1 << USB_IN_ACK_RCVD)
1795 | (1 << USB_IN_NAK_SENT)
1796 | (1 << USB_STALL_SENT)
1797 | (1 << LOCAL_OUT_ZLP));
1798
1799 /*
1800 * Ensure Control Read pre-validation setting is beyond maximum size
1801 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1802 * an EP0 transfer following the Control Write is a Control Read,
1803 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1804 * pre-validation count.
1805 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1806 * the pre-validation count cannot cause an unexpected validatation
1807 */
1808 net2272_write(dev, PAGESEL, 0);
1809 net2272_write(dev, EP_TRANSFER2, 0xff);
1810 net2272_write(dev, EP_TRANSFER1, 0xff);
1811 net2272_write(dev, EP_TRANSFER0, 0xff);
1812
1813 u.raw[0] = net2272_read(dev, SETUP0);
1814 u.raw[1] = net2272_read(dev, SETUP1);
1815 u.raw[2] = net2272_read(dev, SETUP2);
1816 u.raw[3] = net2272_read(dev, SETUP3);
1817 u.raw[4] = net2272_read(dev, SETUP4);
1818 u.raw[5] = net2272_read(dev, SETUP5);
1819 u.raw[6] = net2272_read(dev, SETUP6);
1820 u.raw[7] = net2272_read(dev, SETUP7);
1821 /*
1822 * If you have a big endian cpu make sure le16_to_cpus
1823 * performs the proper byte swapping here...
1824 */
1825 le16_to_cpus(&u.r.wValue);
1826 le16_to_cpus(&u.r.wIndex);
1827 le16_to_cpus(&u.r.wLength);
1828
1829 /* ack the irq */
1830 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1831 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1832
1833 /* watch control traffic at the token level, and force
1834 * synchronization before letting the status phase happen.
1835 */
1836 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1837 if (ep->is_in) {
1838 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1839 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1840 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1841 stop_out_naking(ep);
1842 } else
1843 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1844 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1845 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1846 net2272_ep_write(ep, EP_IRQENB, scratch);
1847
1848 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1849 goto delegate;
1850 switch (u.r.bRequest) {
1851 case USB_REQ_GET_STATUS: {
1852 struct net2272_ep *e;
1853 u16 status = 0;
1854
1855 switch (u.r.bRequestType & USB_RECIP_MASK) {
1856 case USB_RECIP_ENDPOINT:
1857 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1858 if (!e || u.r.wLength > 2)
1859 goto do_stall;
1860 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1861 status = __constant_cpu_to_le16(1);
1862 else
1863 status = __constant_cpu_to_le16(0);
1864
1865 /* don't bother with a request object! */
1866 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1867 writew(status, net2272_reg_addr(dev, EP_DATA));
1868 set_fifo_bytecount(&dev->ep[0], 0);
1869 allow_status(ep);
1870 dev_vdbg(dev->dev, "%s stat %02x\n",
1871 ep->ep.name, status);
1872 goto next_endpoints;
1873 case USB_RECIP_DEVICE:
1874 if (u.r.wLength > 2)
1875 goto do_stall;
1876 if (dev->is_selfpowered)
1877 status = (1 << USB_DEVICE_SELF_POWERED);
1878
1879 /* don't bother with a request object! */
1880 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1881 writew(status, net2272_reg_addr(dev, EP_DATA));
1882 set_fifo_bytecount(&dev->ep[0], 0);
1883 allow_status(ep);
1884 dev_vdbg(dev->dev, "device stat %02x\n", status);
1885 goto next_endpoints;
1886 case USB_RECIP_INTERFACE:
1887 if (u.r.wLength > 2)
1888 goto do_stall;
1889
1890 /* don't bother with a request object! */
1891 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1892 writew(status, net2272_reg_addr(dev, EP_DATA));
1893 set_fifo_bytecount(&dev->ep[0], 0);
1894 allow_status(ep);
1895 dev_vdbg(dev->dev, "interface status %02x\n", status);
1896 goto next_endpoints;
1897 }
1898
1899 break;
1900 }
1901 case USB_REQ_CLEAR_FEATURE: {
1902 struct net2272_ep *e;
1903
1904 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1905 goto delegate;
1906 if (u.r.wValue != USB_ENDPOINT_HALT ||
1907 u.r.wLength != 0)
1908 goto do_stall;
1909 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1910 if (!e)
1911 goto do_stall;
1912 if (e->wedged) {
1913 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1914 ep->ep.name);
1915 } else {
1916 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1917 clear_halt(e);
1918 }
1919 allow_status(ep);
1920 goto next_endpoints;
1921 }
1922 case USB_REQ_SET_FEATURE: {
1923 struct net2272_ep *e;
1924
1925 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1926 if (u.r.wIndex != NORMAL_OPERATION)
1927 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1928 allow_status(ep);
1929 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1930 goto next_endpoints;
1931 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1932 goto delegate;
1933 if (u.r.wValue != USB_ENDPOINT_HALT ||
1934 u.r.wLength != 0)
1935 goto do_stall;
1936 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1937 if (!e)
1938 goto do_stall;
1939 set_halt(e);
1940 allow_status(ep);
1941 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1942 goto next_endpoints;
1943 }
1944 case USB_REQ_SET_ADDRESS: {
1945 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1946 allow_status(ep);
1947 break;
1948 }
1949 default:
1950 delegate:
1951 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1952 "ep_cfg %08x\n",
1953 u.r.bRequestType, u.r.bRequest,
1954 u.r.wValue, u.r.wIndex,
1955 net2272_ep_read(ep, EP_CFG));
1956 spin_unlock(&dev->lock);
1957 tmp = dev->driver->setup(&dev->gadget, &u.r);
1958 spin_lock(&dev->lock);
1959 }
1960
1961 /* stall ep0 on error */
1962 if (tmp < 0) {
1963 do_stall:
1964 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1965 u.r.bRequestType, u.r.bRequest, tmp);
1966 dev->protocol_stall = 1;
1967 }
1968 /* endpoint dma irq? */
1969 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1970 net2272_cancel_dma(dev);
1971 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1972 stat &= ~(1 << DMA_DONE_INTERRUPT);
1973 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1974 ? 2 : 1;
1975
1976 ep = &dev->ep[num];
1977 net2272_handle_dma(ep);
1978 }
1979
1980 next_endpoints:
1981 /* endpoint data irq? */
1982 scratch = stat & 0x0f;
1983 stat &= ~0x0f;
1984 for (num = 0; scratch; num++) {
1985 u8 t;
1986
1987 /* does this endpoint's FIFO and queue need tending? */
1988 t = 1 << num;
1989 if ((scratch & t) == 0)
1990 continue;
1991 scratch ^= t;
1992
1993 ep = &dev->ep[num];
1994 net2272_handle_ep(ep);
1995 }
1996
1997 /* some interrupts we can just ignore */
1998 stat &= ~(1 << SOF_INTERRUPT);
1999
2000 if (stat)
2001 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
2002}
2003
2004static void
2005net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
2006{
2007 u8 tmp, mask;
2008
2009 /* after disconnect there's nothing else to do! */
2010 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
2011 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
2012
2013 if (stat & tmp) {
2014 net2272_write(dev, IRQSTAT1, tmp);
2015 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2016 ((net2272_read(dev, USBCTL1) & mask) == 0))
2017 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
2018 == 0))
2019 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
2020 dev_dbg(dev->dev, "disconnect %s\n",
2021 dev->driver->driver.name);
2022 stop_activity(dev, dev->driver);
2023 net2272_ep0_start(dev);
2024 return;
2025 }
2026 stat &= ~tmp;
2027
2028 if (!stat)
2029 return;
2030 }
2031
2032 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2033 if (stat & tmp) {
2034 net2272_write(dev, IRQSTAT1, tmp);
2035 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2036 if (dev->driver->suspend)
2037 dev->driver->suspend(&dev->gadget);
2038 if (!enable_suspend) {
2039 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2040 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2041 }
2042 } else {
2043 if (dev->driver->resume)
2044 dev->driver->resume(&dev->gadget);
2045 }
2046 stat &= ~tmp;
2047 }
2048
2049 /* clear any other status/irqs */
2050 if (stat)
2051 net2272_write(dev, IRQSTAT1, stat);
2052
2053 /* some status we can just ignore */
2054 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2055 | (1 << SUSPEND_REQUEST_INTERRUPT)
2056 | (1 << RESUME_INTERRUPT));
2057 if (!stat)
2058 return;
2059 else
2060 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2061}
2062
2063static irqreturn_t net2272_irq(int irq, void *_dev)
2064{
2065 struct net2272 *dev = _dev;
2066#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2067 u32 intcsr;
2068#endif
2069#if defined(PLX_PCI_RDK)
2070 u8 dmareq;
2071#endif
2072 spin_lock(&dev->lock);
2073#if defined(PLX_PCI_RDK)
2074 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2075
2076 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2077 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2078 dev->rdk1.plx9054_base_addr + INTCSR);
2079 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2080 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2081 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2082 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2083 dev->rdk1.plx9054_base_addr + INTCSR);
2084 }
2085 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2086 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2087 dev->rdk1.plx9054_base_addr + DMACSR0);
2088
2089 dmareq = net2272_read(dev, DMAREQ);
2090 if (dmareq & 0x01)
2091 net2272_handle_dma(&dev->ep[2]);
2092 else
2093 net2272_handle_dma(&dev->ep[1]);
2094 }
2095#endif
2096#if defined(PLX_PCI_RDK2)
2097 /* see if PCI int for us by checking irqstat */
2098 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2099 if (!intcsr & (1 << NET2272_PCI_IRQ))
2100 return IRQ_NONE;
2101 /* check dma interrupts */
2102#endif
2103 /* Platform/devcice interrupt handler */
2104#if !defined(PLX_PCI_RDK)
2105 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2106 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2107#endif
2108 spin_unlock(&dev->lock);
2109
2110 return IRQ_HANDLED;
2111}
2112
2113static int net2272_present(struct net2272 *dev)
2114{
2115 /*
2116 * Quick test to see if CPU can communicate properly with the NET2272.
2117 * Verifies connection using writes and reads to write/read and
2118 * read-only registers.
2119 *
2120 * This routine is strongly recommended especially during early bring-up
2121 * of new hardware, however for designs that do not apply Power On System
2122 * Tests (POST) it may discarded (or perhaps minimized).
2123 */
2124 unsigned int ii;
2125 u8 val, refval;
2126
2127 /* Verify NET2272 write/read SCRATCH register can write and read */
2128 refval = net2272_read(dev, SCRATCH);
2129 for (ii = 0; ii < 0x100; ii += 7) {
2130 net2272_write(dev, SCRATCH, ii);
2131 val = net2272_read(dev, SCRATCH);
2132 if (val != ii) {
2133 dev_dbg(dev->dev,
2134 "%s: write/read SCRATCH register test failed: "
2135 "wrote:0x%2.2x, read:0x%2.2x\n",
2136 __func__, ii, val);
2137 return -EINVAL;
2138 }
2139 }
2140 /* To be nice, we write the original SCRATCH value back: */
2141 net2272_write(dev, SCRATCH, refval);
2142
2143 /* Verify NET2272 CHIPREV register is read-only: */
2144 refval = net2272_read(dev, CHIPREV_2272);
2145 for (ii = 0; ii < 0x100; ii += 7) {
2146 net2272_write(dev, CHIPREV_2272, ii);
2147 val = net2272_read(dev, CHIPREV_2272);
2148 if (val != refval) {
2149 dev_dbg(dev->dev,
2150 "%s: write/read CHIPREV register test failed: "
2151 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2152 __func__, ii, val, refval);
2153 return -EINVAL;
2154 }
2155 }
2156
2157 /*
2158 * Verify NET2272's "NET2270 legacy revision" register
2159 * - NET2272 has two revision registers. The NET2270 legacy revision
2160 * register should read the same value, regardless of the NET2272
2161 * silicon revision. The legacy register applies to NET2270
2162 * firmware being applied to the NET2272.
2163 */
2164 val = net2272_read(dev, CHIPREV_LEGACY);
2165 if (val != NET2270_LEGACY_REV) {
2166 /*
2167 * Unexpected legacy revision value
2168 * - Perhaps the chip is a NET2270?
2169 */
2170 dev_dbg(dev->dev,
2171 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2172 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2173 __func__, NET2270_LEGACY_REV, val);
2174 return -EINVAL;
2175 }
2176
2177 /*
2178 * Verify NET2272 silicon revision
2179 * - This revision register is appropriate for the silicon version
2180 * of the NET2272
2181 */
2182 val = net2272_read(dev, CHIPREV_2272);
2183 switch (val) {
2184 case CHIPREV_NET2272_R1:
2185 /*
2186 * NET2272 Rev 1 has DMA related errata:
2187 * - Newer silicon (Rev 1A or better) required
2188 */
2189 dev_dbg(dev->dev,
2190 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2191 __func__);
2192 break;
2193 case CHIPREV_NET2272_R1A:
2194 break;
2195 default:
2196 /* NET2272 silicon version *may* not work with this firmware */
2197 dev_dbg(dev->dev,
2198 "%s: unexpected silicon revision register value: "
2199 " CHIPREV_2272: 0x%2.2x\n",
2200 __func__, val);
2201 /*
2202 * Return Success, even though the chip rev is not an expected value
2203 * - Older, pre-built firmware can attempt to operate on newer silicon
2204 * - Often, new silicon is perfectly compatible
2205 */
2206 }
2207
2208 /* Success: NET2272 checks out OK */
2209 return 0;
2210}
2211
2212static void
2213net2272_gadget_release(struct device *_dev)
2214{
2215 struct net2272 *dev = dev_get_drvdata(_dev);
2216 kfree(dev);
2217}
2218
2219/*---------------------------------------------------------------------------*/
2220
2221static void __devexit
2222net2272_remove(struct net2272 *dev)
2223{
2224 usb_del_gadget_udc(&dev->gadget);
2225
2226 /* start with the driver above us */
2227 if (dev->driver) {
2228 /* should have been done already by driver model core */
2229 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2230 dev->driver->driver.name);
2231 usb_gadget_unregister_driver(dev->driver);
2232 }
2233
2234 free_irq(dev->irq, dev);
2235 iounmap(dev->base_addr);
2236
2237 device_unregister(&dev->gadget.dev);
2238 device_remove_file(dev->dev, &dev_attr_registers);
2239
2240 dev_info(dev->dev, "unbind\n");
2241 the_controller = NULL;
2242}
2243
2244static struct net2272 * __devinit
2245net2272_probe_init(struct device *dev, unsigned int irq)
2246{
2247 struct net2272 *ret;
2248
2249 if (the_controller) {
2250 dev_warn(dev, "ignoring\n");
2251 return ERR_PTR(-EBUSY);
2252 }
2253
2254 if (!irq) {
2255 dev_dbg(dev, "No IRQ!\n");
2256 return ERR_PTR(-ENODEV);
2257 }
2258
2259 /* alloc, and start init */
2260 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2261 if (!ret)
2262 return ERR_PTR(-ENOMEM);
2263
2264 spin_lock_init(&ret->lock);
2265 ret->irq = irq;
2266 ret->dev = dev;
2267 ret->gadget.ops = &net2272_ops;
2268 ret->gadget.is_dualspeed = 1;
2269
2270 /* the "gadget" abstracts/virtualizes the controller */
2271 dev_set_name(&ret->gadget.dev, "gadget");
2272 ret->gadget.dev.parent = dev;
2273 ret->gadget.dev.dma_mask = dev->dma_mask;
2274 ret->gadget.dev.release = net2272_gadget_release;
2275 ret->gadget.name = driver_name;
2276
2277 return ret;
2278}
2279
2280static int __devinit
2281net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2282{
2283 int ret;
2284
2285 /* See if there... */
2286 if (net2272_present(dev)) {
2287 dev_warn(dev->dev, "2272 not found!\n");
2288 ret = -ENODEV;
2289 goto err;
2290 }
2291
2292 net2272_usb_reset(dev);
2293 net2272_usb_reinit(dev);
2294
2295 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2296 if (ret) {
2297 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2298 goto err;
2299 }
2300
2301 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2302
2303 /* done */
2304 dev_info(dev->dev, "%s\n", driver_desc);
2305 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2306 dev->irq, dev->base_addr, dev->chiprev,
2307 dma_mode_string());
2308 dev_info(dev->dev, "version: %s\n", driver_vers);
2309
2310 the_controller = dev;
2311
2312 ret = device_register(&dev->gadget.dev);
2313 if (ret)
2314 goto err_irq;
2315 ret = device_create_file(dev->dev, &dev_attr_registers);
2316 if (ret)
2317 goto err_dev_reg;
2318
2319 ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
2320 if (ret)
2321 goto err_add_udc;
2322
2323 return 0;
2324
2325err_add_udc:
2326 device_remove_file(dev->dev, &dev_attr_registers);
2327 err_dev_reg:
2328 device_unregister(&dev->gadget.dev);
2329 err_irq:
2330 free_irq(dev->irq, dev);
2331 err:
2332 return ret;
2333}
2334
2335#ifdef CONFIG_PCI
2336
2337/*
2338 * wrap this driver around the specified device, but
2339 * don't respond over USB until a gadget driver binds to us
2340 */
2341
2342static int __devinit
2343net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2344{
2345 unsigned long resource, len, tmp;
2346 void __iomem *mem_mapped_addr[4];
2347 int ret, i;
2348
2349 /*
2350 * BAR 0 holds PLX 9054 config registers
2351 * BAR 1 is i/o memory; unused here
2352 * BAR 2 holds EPLD config registers
2353 * BAR 3 holds NET2272 registers
2354 */
2355
2356 /* Find and map all address spaces */
2357 for (i = 0; i < 4; ++i) {
2358 if (i == 1)
2359 continue; /* BAR1 unused */
2360
2361 resource = pci_resource_start(pdev, i);
2362 len = pci_resource_len(pdev, i);
2363
2364 if (!request_mem_region(resource, len, driver_name)) {
2365 dev_dbg(dev->dev, "controller already in use\n");
2366 ret = -EBUSY;
2367 goto err;
2368 }
2369
2370 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2371 if (mem_mapped_addr[i] == NULL) {
2372 release_mem_region(resource, len);
2373 dev_dbg(dev->dev, "can't map memory\n");
2374 ret = -EFAULT;
2375 goto err;
2376 }
2377 }
2378
2379 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2380 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2381 dev->base_addr = mem_mapped_addr[3];
2382
2383 /* Set PLX 9054 bus width (16 bits) */
2384 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2385 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2386 dev->rdk1.plx9054_base_addr + LBRD1);
2387
2388 /* Enable PLX 9054 Interrupts */
2389 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2390 (1 << PCI_INTERRUPT_ENABLE) |
2391 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2392 dev->rdk1.plx9054_base_addr + INTCSR);
2393
2394 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2395 dev->rdk1.plx9054_base_addr + DMACSR0);
2396
2397 /* reset */
2398 writeb((1 << EPLD_DMA_ENABLE) |
2399 (1 << DMA_CTL_DACK) |
2400 (1 << DMA_TIMEOUT_ENABLE) |
2401 (1 << USER) |
2402 (0 << MPX_MODE) |
2403 (1 << BUSWIDTH) |
2404 (1 << NET2272_RESET),
2405 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2406
2407 mb();
2408 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2409 ~(1 << NET2272_RESET),
2410 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2411 udelay(200);
2412
2413 return 0;
2414
2415 err:
2416 while (--i >= 0) {
2417 iounmap(mem_mapped_addr[i]);
2418 release_mem_region(pci_resource_start(pdev, i),
2419 pci_resource_len(pdev, i));
2420 }
2421
2422 return ret;
2423}
2424
2425static int __devinit
2426net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2427{
2428 unsigned long resource, len;
2429 void __iomem *mem_mapped_addr[2];
2430 int ret, i;
2431
2432 /*
2433 * BAR 0 holds FGPA config registers
2434 * BAR 1 holds NET2272 registers
2435 */
2436
2437 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2438 for (i = 0; i < 2; ++i) {
2439 resource = pci_resource_start(pdev, i);
2440 len = pci_resource_len(pdev, i);
2441
2442 if (!request_mem_region(resource, len, driver_name)) {
2443 dev_dbg(dev->dev, "controller already in use\n");
2444 ret = -EBUSY;
2445 goto err;
2446 }
2447
2448 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2449 if (mem_mapped_addr[i] == NULL) {
2450 release_mem_region(resource, len);
2451 dev_dbg(dev->dev, "can't map memory\n");
2452 ret = -EFAULT;
2453 goto err;
2454 }
2455 }
2456
2457 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2458 dev->base_addr = mem_mapped_addr[1];
2459
2460 mb();
2461 /* Set 2272 bus width (16 bits) and reset */
2462 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2463 udelay(200);
2464 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2465 /* Print fpga version number */
2466 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2467 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2468 /* Enable FPGA Interrupts */
2469 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2470
2471 return 0;
2472
2473 err:
2474 while (--i >= 0) {
2475 iounmap(mem_mapped_addr[i]);
2476 release_mem_region(pci_resource_start(pdev, i),
2477 pci_resource_len(pdev, i));
2478 }
2479
2480 return ret;
2481}
2482
2483static int __devinit
2484net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2485{
2486 struct net2272 *dev;
2487 int ret;
2488
2489 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2490 if (IS_ERR(dev))
2491 return PTR_ERR(dev);
2492 dev->dev_id = pdev->device;
2493
2494 if (pci_enable_device(pdev) < 0) {
2495 ret = -ENODEV;
2496 goto err_free;
2497 }
2498
2499 pci_set_master(pdev);
2500
2501 switch (pdev->device) {
2502 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2503 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2504 default: BUG();
2505 }
2506 if (ret)
2507 goto err_pci;
2508
2509 ret = net2272_probe_fin(dev, 0);
2510 if (ret)
2511 goto err_pci;
2512
2513 pci_set_drvdata(pdev, dev);
2514
2515 return 0;
2516
2517 err_pci:
2518 pci_disable_device(pdev);
2519 err_free:
2520 kfree(dev);
2521
2522 return ret;
2523}
2524
2525static void __devexit
2526net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2527{
2528 int i;
2529
2530 /* disable PLX 9054 interrupts */
2531 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2532 ~(1 << PCI_INTERRUPT_ENABLE),
2533 dev->rdk1.plx9054_base_addr + INTCSR);
2534
2535 /* clean up resources allocated during probe() */
2536 iounmap(dev->rdk1.plx9054_base_addr);
2537 iounmap(dev->rdk1.epld_base_addr);
2538
2539 for (i = 0; i < 4; ++i) {
2540 if (i == 1)
2541 continue; /* BAR1 unused */
2542 release_mem_region(pci_resource_start(pdev, i),
2543 pci_resource_len(pdev, i));
2544 }
2545}
2546
2547static void __devexit
2548net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2549{
2550 int i;
2551
2552 /* disable fpga interrupts
2553 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2554 ~(1 << PCI_INTERRUPT_ENABLE),
2555 dev->rdk1.plx9054_base_addr + INTCSR);
2556 */
2557
2558 /* clean up resources allocated during probe() */
2559 iounmap(dev->rdk2.fpga_base_addr);
2560
2561 for (i = 0; i < 2; ++i)
2562 release_mem_region(pci_resource_start(pdev, i),
2563 pci_resource_len(pdev, i));
2564}
2565
2566static void __devexit
2567net2272_pci_remove(struct pci_dev *pdev)
2568{
2569 struct net2272 *dev = pci_get_drvdata(pdev);
2570
2571 net2272_remove(dev);
2572
2573 switch (pdev->device) {
2574 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2575 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2576 default: BUG();
2577 }
2578
2579 pci_disable_device(pdev);
2580
2581 kfree(dev);
2582}
2583
2584/* Table of matching PCI IDs */
2585static struct pci_device_id __devinitdata pci_ids[] = {
2586 { /* RDK 1 card */
2587 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2588 .class_mask = 0,
2589 .vendor = PCI_VENDOR_ID_PLX,
2590 .device = PCI_DEVICE_ID_RDK1,
2591 .subvendor = PCI_ANY_ID,
2592 .subdevice = PCI_ANY_ID,
2593 },
2594 { /* RDK 2 card */
2595 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2596 .class_mask = 0,
2597 .vendor = PCI_VENDOR_ID_PLX,
2598 .device = PCI_DEVICE_ID_RDK2,
2599 .subvendor = PCI_ANY_ID,
2600 .subdevice = PCI_ANY_ID,
2601 },
2602 { }
2603};
2604MODULE_DEVICE_TABLE(pci, pci_ids);
2605
2606static struct pci_driver net2272_pci_driver = {
2607 .name = driver_name,
2608 .id_table = pci_ids,
2609
2610 .probe = net2272_pci_probe,
2611 .remove = __devexit_p(net2272_pci_remove),
2612};
2613
2614static int net2272_pci_register(void)
2615{
2616 return pci_register_driver(&net2272_pci_driver);
2617}
2618
2619static void net2272_pci_unregister(void)
2620{
2621 pci_unregister_driver(&net2272_pci_driver);
2622}
2623
2624#else
2625static inline int net2272_pci_register(void) { return 0; }
2626static inline void net2272_pci_unregister(void) { }
2627#endif
2628
2629/*---------------------------------------------------------------------------*/
2630
2631static int __devinit
2632net2272_plat_probe(struct platform_device *pdev)
2633{
2634 struct net2272 *dev;
2635 int ret;
2636 unsigned int irqflags;
2637 resource_size_t base, len;
2638 struct resource *iomem, *iomem_bus, *irq_res;
2639
2640 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2641 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2642 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2643 if (!irq_res || !iomem) {
2644 dev_err(&pdev->dev, "must provide irq/base addr");
2645 return -EINVAL;
2646 }
2647
2648 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2649 if (IS_ERR(dev))
2650 return PTR_ERR(dev);
2651
2652 irqflags = 0;
2653 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2654 irqflags |= IRQF_TRIGGER_RISING;
2655 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2656 irqflags |= IRQF_TRIGGER_FALLING;
2657 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2658 irqflags |= IRQF_TRIGGER_HIGH;
2659 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2660 irqflags |= IRQF_TRIGGER_LOW;
2661
2662 base = iomem->start;
2663 len = resource_size(iomem);
2664 if (iomem_bus)
2665 dev->base_shift = iomem_bus->start;
2666
2667 if (!request_mem_region(base, len, driver_name)) {
2668 dev_dbg(dev->dev, "get request memory region!\n");
2669 ret = -EBUSY;
2670 goto err;
2671 }
2672 dev->base_addr = ioremap_nocache(base, len);
2673 if (!dev->base_addr) {
2674 dev_dbg(dev->dev, "can't map memory\n");
2675 ret = -EFAULT;
2676 goto err_req;
2677 }
2678
2679 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2680 if (ret)
2681 goto err_io;
2682
2683 platform_set_drvdata(pdev, dev);
2684 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2685 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2686
2687 the_controller = dev;
2688
2689 return 0;
2690
2691 err_io:
2692 iounmap(dev->base_addr);
2693 err_req:
2694 release_mem_region(base, len);
2695 err:
2696 return ret;
2697}
2698
2699static int __devexit
2700net2272_plat_remove(struct platform_device *pdev)
2701{
2702 struct net2272 *dev = platform_get_drvdata(pdev);
2703
2704 net2272_remove(dev);
2705
2706 release_mem_region(pdev->resource[0].start,
2707 resource_size(&pdev->resource[0]));
2708
2709 kfree(dev);
2710
2711 return 0;
2712}
2713
2714static struct platform_driver net2272_plat_driver = {
2715 .probe = net2272_plat_probe,
2716 .remove = __devexit_p(net2272_plat_remove),
2717 .driver = {
2718 .name = driver_name,
2719 .owner = THIS_MODULE,
2720 },
2721 /* FIXME .suspend, .resume */
2722};
2723MODULE_ALIAS("platform:net2272");
2724
2725static int __init net2272_init(void)
2726{
2727 int ret;
2728
2729 ret = net2272_pci_register();
2730 if (ret)
2731 return ret;
2732 ret = platform_driver_register(&net2272_plat_driver);
2733 if (ret)
2734 goto err_pci;
2735 return ret;
2736
2737err_pci:
2738 net2272_pci_unregister();
2739 return ret;
2740}
2741module_init(net2272_init);
2742
2743static void __exit net2272_cleanup(void)
2744{
2745 net2272_pci_unregister();
2746 platform_driver_unregister(&net2272_plat_driver);
2747}
2748module_exit(net2272_cleanup);
2749
2750MODULE_DESCRIPTION(DRIVER_DESC);
2751MODULE_AUTHOR("PLX Technology, Inc.");
2752MODULE_LICENSE("GPL");
1/*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/pci.h>
35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/timer.h>
40#include <linux/usb.h>
41#include <linux/usb/ch9.h>
42#include <linux/usb/gadget.h>
43
44#include <asm/byteorder.h>
45#include <asm/unaligned.h>
46
47#include "net2272.h"
48
49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
50
51static const char driver_name[] = "net2272";
52static const char driver_vers[] = "2006 October 17/mainline";
53static const char driver_desc[] = DRIVER_DESC;
54
55static const char ep0name[] = "ep0";
56static const char * const ep_name[] = {
57 ep0name,
58 "ep-a", "ep-b", "ep-c",
59};
60
61#define DMA_ADDR_INVALID (~(dma_addr_t)0)
62#ifdef CONFIG_USB_GADGET_NET2272_DMA
63/*
64 * use_dma: the NET2272 can use an external DMA controller.
65 * Note that since there is no generic DMA api, some functions,
66 * notably request_dma, start_dma, and cancel_dma will need to be
67 * modified for your platform's particular dma controller.
68 *
69 * If use_dma is disabled, pio will be used instead.
70 */
71static bool use_dma = 0;
72module_param(use_dma, bool, 0644);
73
74/*
75 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
76 * The NET2272 can only use dma for a single endpoint at a time.
77 * At some point this could be modified to allow either endpoint
78 * to take control of dma as it becomes available.
79 *
80 * Note that DMA should not be used on OUT endpoints unless it can
81 * be guaranteed that no short packets will arrive on an IN endpoint
82 * while the DMA operation is pending. Otherwise the OUT DMA will
83 * terminate prematurely (See NET2272 Errata 630-0213-0101)
84 */
85static ushort dma_ep = 1;
86module_param(dma_ep, ushort, 0644);
87
88/*
89 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
90 * mode 0 == Slow DREQ mode
91 * mode 1 == Fast DREQ mode
92 * mode 2 == Burst mode
93 */
94static ushort dma_mode = 2;
95module_param(dma_mode, ushort, 0644);
96#else
97#define use_dma 0
98#define dma_ep 1
99#define dma_mode 2
100#endif
101
102/*
103 * fifo_mode: net2272 buffer configuration:
104 * mode 0 == ep-{a,b,c} 512db each
105 * mode 1 == ep-a 1k, ep-{b,c} 512db
106 * mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
107 * mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
108 */
109static ushort fifo_mode = 0;
110module_param(fifo_mode, ushort, 0644);
111
112/*
113 * enable_suspend: When enabled, the driver will respond to
114 * USB suspend requests by powering down the NET2272. Otherwise,
115 * USB suspend requests will be ignored. This is acceptible for
116 * self-powered devices. For bus powered devices set this to 1.
117 */
118static ushort enable_suspend = 0;
119module_param(enable_suspend, ushort, 0644);
120
121static void assert_out_naking(struct net2272_ep *ep, const char *where)
122{
123 u8 tmp;
124
125#ifndef DEBUG
126 return;
127#endif
128
129 tmp = net2272_ep_read(ep, EP_STAT0);
130 if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
131 dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
132 ep->ep.name, where, tmp);
133 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
134 }
135}
136#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
137
138static void stop_out_naking(struct net2272_ep *ep)
139{
140 u8 tmp = net2272_ep_read(ep, EP_STAT0);
141
142 if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
143 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
144}
145
146#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
147
148static char *type_string(u8 bmAttributes)
149{
150 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
151 case USB_ENDPOINT_XFER_BULK: return "bulk";
152 case USB_ENDPOINT_XFER_ISOC: return "iso";
153 case USB_ENDPOINT_XFER_INT: return "intr";
154 default: return "control";
155 }
156}
157
158static char *buf_state_string(unsigned state)
159{
160 switch (state) {
161 case BUFF_FREE: return "free";
162 case BUFF_VALID: return "valid";
163 case BUFF_LCL: return "local";
164 case BUFF_USB: return "usb";
165 default: return "unknown";
166 }
167}
168
169static char *dma_mode_string(void)
170{
171 if (!use_dma)
172 return "PIO";
173 switch (dma_mode) {
174 case 0: return "SLOW DREQ";
175 case 1: return "FAST DREQ";
176 case 2: return "BURST";
177 default: return "invalid";
178 }
179}
180
181static void net2272_dequeue_all(struct net2272_ep *);
182static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
183static int net2272_fifo_status(struct usb_ep *);
184
185static struct usb_ep_ops net2272_ep_ops;
186
187/*---------------------------------------------------------------------------*/
188
189static int
190net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
191{
192 struct net2272 *dev;
193 struct net2272_ep *ep;
194 u32 max;
195 u8 tmp;
196 unsigned long flags;
197
198 ep = container_of(_ep, struct net2272_ep, ep);
199 if (!_ep || !desc || ep->desc || _ep->name == ep0name
200 || desc->bDescriptorType != USB_DT_ENDPOINT)
201 return -EINVAL;
202 dev = ep->dev;
203 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
204 return -ESHUTDOWN;
205
206 max = usb_endpoint_maxp(desc) & 0x1fff;
207
208 spin_lock_irqsave(&dev->lock, flags);
209 _ep->maxpacket = max & 0x7fff;
210 ep->desc = desc;
211
212 /* net2272_ep_reset() has already been called */
213 ep->stopped = 0;
214 ep->wedged = 0;
215
216 /* set speed-dependent max packet */
217 net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
218 net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
219
220 /* set type, direction, address; reset fifo counters */
221 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
222 tmp = usb_endpoint_type(desc);
223 if (usb_endpoint_xfer_bulk(desc)) {
224 /* catch some particularly blatant driver bugs */
225 if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
226 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
227 spin_unlock_irqrestore(&dev->lock, flags);
228 return -ERANGE;
229 }
230 }
231 ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
232 tmp <<= ENDPOINT_TYPE;
233 tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
234 tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
235 tmp |= (1 << ENDPOINT_ENABLE);
236
237 /* for OUT transfers, block the rx fifo until a read is posted */
238 ep->is_in = usb_endpoint_dir_in(desc);
239 if (!ep->is_in)
240 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
241
242 net2272_ep_write(ep, EP_CFG, tmp);
243
244 /* enable irqs */
245 tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
246 net2272_write(dev, IRQENB0, tmp);
247
248 tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
249 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
250 | net2272_ep_read(ep, EP_IRQENB);
251 net2272_ep_write(ep, EP_IRQENB, tmp);
252
253 tmp = desc->bEndpointAddress;
254 dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
255 _ep->name, tmp & 0x0f, PIPEDIR(tmp),
256 type_string(desc->bmAttributes), max,
257 net2272_ep_read(ep, EP_CFG));
258
259 spin_unlock_irqrestore(&dev->lock, flags);
260 return 0;
261}
262
263static void net2272_ep_reset(struct net2272_ep *ep)
264{
265 u8 tmp;
266
267 ep->desc = NULL;
268 INIT_LIST_HEAD(&ep->queue);
269
270 ep->ep.maxpacket = ~0;
271 ep->ep.ops = &net2272_ep_ops;
272
273 /* disable irqs, endpoint */
274 net2272_ep_write(ep, EP_IRQENB, 0);
275
276 /* init to our chosen defaults, notably so that we NAK OUT
277 * packets until the driver queues a read.
278 */
279 tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
280 net2272_ep_write(ep, EP_RSPSET, tmp);
281
282 tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
283 if (ep->num != 0)
284 tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
285
286 net2272_ep_write(ep, EP_RSPCLR, tmp);
287
288 /* scrub most status bits, and flush any fifo state */
289 net2272_ep_write(ep, EP_STAT0,
290 (1 << DATA_IN_TOKEN_INTERRUPT)
291 | (1 << DATA_OUT_TOKEN_INTERRUPT)
292 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
293 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
294 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
295
296 net2272_ep_write(ep, EP_STAT1,
297 (1 << TIMEOUT)
298 | (1 << USB_OUT_ACK_SENT)
299 | (1 << USB_OUT_NAK_SENT)
300 | (1 << USB_IN_ACK_RCVD)
301 | (1 << USB_IN_NAK_SENT)
302 | (1 << USB_STALL_SENT)
303 | (1 << LOCAL_OUT_ZLP)
304 | (1 << BUFFER_FLUSH));
305
306 /* fifo size is handled seperately */
307}
308
309static int net2272_disable(struct usb_ep *_ep)
310{
311 struct net2272_ep *ep;
312 unsigned long flags;
313
314 ep = container_of(_ep, struct net2272_ep, ep);
315 if (!_ep || !ep->desc || _ep->name == ep0name)
316 return -EINVAL;
317
318 spin_lock_irqsave(&ep->dev->lock, flags);
319 net2272_dequeue_all(ep);
320 net2272_ep_reset(ep);
321
322 dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
323
324 spin_unlock_irqrestore(&ep->dev->lock, flags);
325 return 0;
326}
327
328/*---------------------------------------------------------------------------*/
329
330static struct usb_request *
331net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
332{
333 struct net2272_ep *ep;
334 struct net2272_request *req;
335
336 if (!_ep)
337 return NULL;
338 ep = container_of(_ep, struct net2272_ep, ep);
339
340 req = kzalloc(sizeof(*req), gfp_flags);
341 if (!req)
342 return NULL;
343
344 req->req.dma = DMA_ADDR_INVALID;
345 INIT_LIST_HEAD(&req->queue);
346
347 return &req->req;
348}
349
350static void
351net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
352{
353 struct net2272_ep *ep;
354 struct net2272_request *req;
355
356 ep = container_of(_ep, struct net2272_ep, ep);
357 if (!_ep || !_req)
358 return;
359
360 req = container_of(_req, struct net2272_request, req);
361 WARN_ON(!list_empty(&req->queue));
362 kfree(req);
363}
364
365static void
366net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
367{
368 struct net2272 *dev;
369 unsigned stopped = ep->stopped;
370
371 if (ep->num == 0) {
372 if (ep->dev->protocol_stall) {
373 ep->stopped = 1;
374 set_halt(ep);
375 }
376 allow_status(ep);
377 }
378
379 list_del_init(&req->queue);
380
381 if (req->req.status == -EINPROGRESS)
382 req->req.status = status;
383 else
384 status = req->req.status;
385
386 dev = ep->dev;
387 if (use_dma && ep->dma)
388 usb_gadget_unmap_request(&dev->gadget, &req->req,
389 ep->is_in);
390
391 if (status && status != -ESHUTDOWN)
392 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
393 ep->ep.name, &req->req, status,
394 req->req.actual, req->req.length, req->req.buf);
395
396 /* don't modify queue heads during completion callback */
397 ep->stopped = 1;
398 spin_unlock(&dev->lock);
399 req->req.complete(&ep->ep, &req->req);
400 spin_lock(&dev->lock);
401 ep->stopped = stopped;
402}
403
404static int
405net2272_write_packet(struct net2272_ep *ep, u8 *buf,
406 struct net2272_request *req, unsigned max)
407{
408 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
409 u16 *bufp;
410 unsigned length, count;
411 u8 tmp;
412
413 length = min(req->req.length - req->req.actual, max);
414 req->req.actual += length;
415
416 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
417 ep->ep.name, req, max, length,
418 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
419
420 count = length;
421 bufp = (u16 *)buf;
422
423 while (likely(count >= 2)) {
424 /* no byte-swap required; chip endian set during init */
425 writew(*bufp++, ep_data);
426 count -= 2;
427 }
428 buf = (u8 *)bufp;
429
430 /* write final byte by placing the NET2272 into 8-bit mode */
431 if (unlikely(count)) {
432 tmp = net2272_read(ep->dev, LOCCTL);
433 net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
434 writeb(*buf, ep_data);
435 net2272_write(ep->dev, LOCCTL, tmp);
436 }
437 return length;
438}
439
440/* returns: 0: still running, 1: completed, negative: errno */
441static int
442net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
443{
444 u8 *buf;
445 unsigned count, max;
446 int status;
447
448 dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
449 ep->ep.name, req->req.actual, req->req.length);
450
451 /*
452 * Keep loading the endpoint until the final packet is loaded,
453 * or the endpoint buffer is full.
454 */
455 top:
456 /*
457 * Clear interrupt status
458 * - Packet Transmitted interrupt will become set again when the
459 * host successfully takes another packet
460 */
461 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
462 while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
463 buf = req->req.buf + req->req.actual;
464 prefetch(buf);
465
466 /* force pagesel */
467 net2272_ep_read(ep, EP_STAT0);
468
469 max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
470 (net2272_ep_read(ep, EP_AVAIL0));
471
472 if (max < ep->ep.maxpacket)
473 max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
474 | (net2272_ep_read(ep, EP_AVAIL0));
475
476 count = net2272_write_packet(ep, buf, req, max);
477 /* see if we are done */
478 if (req->req.length == req->req.actual) {
479 /* validate short or zlp packet */
480 if (count < ep->ep.maxpacket)
481 set_fifo_bytecount(ep, 0);
482 net2272_done(ep, req, 0);
483
484 if (!list_empty(&ep->queue)) {
485 req = list_entry(ep->queue.next,
486 struct net2272_request,
487 queue);
488 status = net2272_kick_dma(ep, req);
489
490 if (status < 0)
491 if ((net2272_ep_read(ep, EP_STAT0)
492 & (1 << BUFFER_EMPTY)))
493 goto top;
494 }
495 return 1;
496 }
497 net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
498 }
499 return 0;
500}
501
502static void
503net2272_out_flush(struct net2272_ep *ep)
504{
505 ASSERT_OUT_NAKING(ep);
506
507 net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
508 | (1 << DATA_PACKET_RECEIVED_INTERRUPT));
509 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
510}
511
512static int
513net2272_read_packet(struct net2272_ep *ep, u8 *buf,
514 struct net2272_request *req, unsigned avail)
515{
516 u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
517 unsigned is_short;
518 u16 *bufp;
519
520 req->req.actual += avail;
521
522 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
523 ep->ep.name, req, avail,
524 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
525
526 is_short = (avail < ep->ep.maxpacket);
527
528 if (unlikely(avail == 0)) {
529 /* remove any zlp from the buffer */
530 (void)readw(ep_data);
531 return is_short;
532 }
533
534 /* Ensure we get the final byte */
535 if (unlikely(avail % 2))
536 avail++;
537 bufp = (u16 *)buf;
538
539 do {
540 *bufp++ = readw(ep_data);
541 avail -= 2;
542 } while (avail);
543
544 /*
545 * To avoid false endpoint available race condition must read
546 * ep stat0 twice in the case of a short transfer
547 */
548 if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
549 net2272_ep_read(ep, EP_STAT0);
550
551 return is_short;
552}
553
554static int
555net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
556{
557 u8 *buf;
558 unsigned is_short;
559 int count;
560 int tmp;
561 int cleanup = 0;
562 int status = -1;
563
564 dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
565 ep->ep.name, req->req.actual, req->req.length);
566
567 top:
568 do {
569 buf = req->req.buf + req->req.actual;
570 prefetchw(buf);
571
572 count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
573 | net2272_ep_read(ep, EP_AVAIL0);
574
575 net2272_ep_write(ep, EP_STAT0,
576 (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
577 (1 << DATA_PACKET_RECEIVED_INTERRUPT));
578
579 tmp = req->req.length - req->req.actual;
580
581 if (count > tmp) {
582 if ((tmp % ep->ep.maxpacket) != 0) {
583 dev_err(ep->dev->dev,
584 "%s out fifo %d bytes, expected %d\n",
585 ep->ep.name, count, tmp);
586 cleanup = 1;
587 }
588 count = (tmp > 0) ? tmp : 0;
589 }
590
591 is_short = net2272_read_packet(ep, buf, req, count);
592
593 /* completion */
594 if (unlikely(cleanup || is_short ||
595 ((req->req.actual == req->req.length)
596 && !req->req.zero))) {
597
598 if (cleanup) {
599 net2272_out_flush(ep);
600 net2272_done(ep, req, -EOVERFLOW);
601 } else
602 net2272_done(ep, req, 0);
603
604 /* re-initialize endpoint transfer registers
605 * otherwise they may result in erroneous pre-validation
606 * for subsequent control reads
607 */
608 if (unlikely(ep->num == 0)) {
609 net2272_ep_write(ep, EP_TRANSFER2, 0);
610 net2272_ep_write(ep, EP_TRANSFER1, 0);
611 net2272_ep_write(ep, EP_TRANSFER0, 0);
612 }
613
614 if (!list_empty(&ep->queue)) {
615 req = list_entry(ep->queue.next,
616 struct net2272_request, queue);
617 status = net2272_kick_dma(ep, req);
618 if ((status < 0) &&
619 !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
620 goto top;
621 }
622 return 1;
623 }
624 } while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
625
626 return 0;
627}
628
629static void
630net2272_pio_advance(struct net2272_ep *ep)
631{
632 struct net2272_request *req;
633
634 if (unlikely(list_empty(&ep->queue)))
635 return;
636
637 req = list_entry(ep->queue.next, struct net2272_request, queue);
638 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
639}
640
641/* returns 0 on success, else negative errno */
642static int
643net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
644 unsigned len, unsigned dir)
645{
646 dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
647 ep, buf, len, dir);
648
649 /* The NET2272 only supports a single dma channel */
650 if (dev->dma_busy)
651 return -EBUSY;
652 /*
653 * EP_TRANSFER (used to determine the number of bytes received
654 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
655 */
656 if ((dir == 1) && (len > 0x1000000))
657 return -EINVAL;
658
659 dev->dma_busy = 1;
660
661 /* initialize platform's dma */
662#ifdef CONFIG_PCI
663 /* NET2272 addr, buffer addr, length, etc. */
664 switch (dev->dev_id) {
665 case PCI_DEVICE_ID_RDK1:
666 /* Setup PLX 9054 DMA mode */
667 writel((1 << LOCAL_BUS_WIDTH) |
668 (1 << TA_READY_INPUT_ENABLE) |
669 (0 << LOCAL_BURST_ENABLE) |
670 (1 << DONE_INTERRUPT_ENABLE) |
671 (1 << LOCAL_ADDRESSING_MODE) |
672 (1 << DEMAND_MODE) |
673 (1 << DMA_EOT_ENABLE) |
674 (1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
675 (1 << DMA_CHANNEL_INTERRUPT_SELECT),
676 dev->rdk1.plx9054_base_addr + DMAMODE0);
677
678 writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
679 writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
680 writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
681 writel((dir << DIRECTION_OF_TRANSFER) |
682 (1 << INTERRUPT_AFTER_TERMINAL_COUNT),
683 dev->rdk1.plx9054_base_addr + DMADPR0);
684 writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
685 readl(dev->rdk1.plx9054_base_addr + INTCSR),
686 dev->rdk1.plx9054_base_addr + INTCSR);
687
688 break;
689 }
690#endif
691
692 net2272_write(dev, DMAREQ,
693 (0 << DMA_BUFFER_VALID) |
694 (1 << DMA_REQUEST_ENABLE) |
695 (1 << DMA_CONTROL_DACK) |
696 (dev->dma_eot_polarity << EOT_POLARITY) |
697 (dev->dma_dack_polarity << DACK_POLARITY) |
698 (dev->dma_dreq_polarity << DREQ_POLARITY) |
699 ((ep >> 1) << DMA_ENDPOINT_SELECT));
700
701 (void) net2272_read(dev, SCRATCH);
702
703 return 0;
704}
705
706static void
707net2272_start_dma(struct net2272 *dev)
708{
709 /* start platform's dma controller */
710#ifdef CONFIG_PCI
711 switch (dev->dev_id) {
712 case PCI_DEVICE_ID_RDK1:
713 writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
714 dev->rdk1.plx9054_base_addr + DMACSR0);
715 break;
716 }
717#endif
718}
719
720/* returns 0 on success, else negative errno */
721static int
722net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
723{
724 unsigned size;
725 u8 tmp;
726
727 if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
728 return -EINVAL;
729
730 /* don't use dma for odd-length transfers
731 * otherwise, we'd need to deal with the last byte with pio
732 */
733 if (req->req.length & 1)
734 return -EINVAL;
735
736 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
737 ep->ep.name, req, (unsigned long long) req->req.dma);
738
739 net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
740
741 /* The NET2272 can only use DMA on one endpoint at a time */
742 if (ep->dev->dma_busy)
743 return -EBUSY;
744
745 /* Make sure we only DMA an even number of bytes (we'll use
746 * pio to complete the transfer)
747 */
748 size = req->req.length;
749 size &= ~1;
750
751 /* device-to-host transfer */
752 if (ep->is_in) {
753 /* initialize platform's dma controller */
754 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
755 /* unable to obtain DMA channel; return error and use pio mode */
756 return -EBUSY;
757 req->req.actual += size;
758
759 /* host-to-device transfer */
760 } else {
761 tmp = net2272_ep_read(ep, EP_STAT0);
762
763 /* initialize platform's dma controller */
764 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
765 /* unable to obtain DMA channel; return error and use pio mode */
766 return -EBUSY;
767
768 if (!(tmp & (1 << BUFFER_EMPTY)))
769 ep->not_empty = 1;
770 else
771 ep->not_empty = 0;
772
773
774 /* allow the endpoint's buffer to fill */
775 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
776
777 /* this transfer completed and data's already in the fifo
778 * return error so pio gets used.
779 */
780 if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
781
782 /* deassert dreq */
783 net2272_write(ep->dev, DMAREQ,
784 (0 << DMA_BUFFER_VALID) |
785 (0 << DMA_REQUEST_ENABLE) |
786 (1 << DMA_CONTROL_DACK) |
787 (ep->dev->dma_eot_polarity << EOT_POLARITY) |
788 (ep->dev->dma_dack_polarity << DACK_POLARITY) |
789 (ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
790 ((ep->num >> 1) << DMA_ENDPOINT_SELECT));
791
792 return -EBUSY;
793 }
794 }
795
796 /* Don't use per-packet interrupts: use dma interrupts only */
797 net2272_ep_write(ep, EP_IRQENB, 0);
798
799 net2272_start_dma(ep->dev);
800
801 return 0;
802}
803
804static void net2272_cancel_dma(struct net2272 *dev)
805{
806#ifdef CONFIG_PCI
807 switch (dev->dev_id) {
808 case PCI_DEVICE_ID_RDK1:
809 writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
810 writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
811 while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
812 (1 << CHANNEL_DONE)))
813 continue; /* wait for dma to stabalize */
814
815 /* dma abort generates an interrupt */
816 writeb(1 << CHANNEL_CLEAR_INTERRUPT,
817 dev->rdk1.plx9054_base_addr + DMACSR0);
818 break;
819 }
820#endif
821
822 dev->dma_busy = 0;
823}
824
825/*---------------------------------------------------------------------------*/
826
827static int
828net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
829{
830 struct net2272_request *req;
831 struct net2272_ep *ep;
832 struct net2272 *dev;
833 unsigned long flags;
834 int status = -1;
835 u8 s;
836
837 req = container_of(_req, struct net2272_request, req);
838 if (!_req || !_req->complete || !_req->buf
839 || !list_empty(&req->queue))
840 return -EINVAL;
841 ep = container_of(_ep, struct net2272_ep, ep);
842 if (!_ep || (!ep->desc && ep->num != 0))
843 return -EINVAL;
844 dev = ep->dev;
845 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
846 return -ESHUTDOWN;
847
848 /* set up dma mapping in case the caller didn't */
849 if (use_dma && ep->dma) {
850 status = usb_gadget_map_request(&dev->gadget, _req,
851 ep->is_in);
852 if (status)
853 return status;
854 }
855
856 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
857 _ep->name, _req, _req->length, _req->buf,
858 (unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
859
860 spin_lock_irqsave(&dev->lock, flags);
861
862 _req->status = -EINPROGRESS;
863 _req->actual = 0;
864
865 /* kickstart this i/o queue? */
866 if (list_empty(&ep->queue) && !ep->stopped) {
867 /* maybe there's no control data, just status ack */
868 if (ep->num == 0 && _req->length == 0) {
869 net2272_done(ep, req, 0);
870 dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
871 goto done;
872 }
873
874 /* Return zlp, don't let it block subsequent packets */
875 s = net2272_ep_read(ep, EP_STAT0);
876 if (s & (1 << BUFFER_EMPTY)) {
877 /* Buffer is empty check for a blocking zlp, handle it */
878 if ((s & (1 << NAK_OUT_PACKETS)) &&
879 net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
880 dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
881 /*
882 * Request is going to terminate with a short packet ...
883 * hope the client is ready for it!
884 */
885 status = net2272_read_fifo(ep, req);
886 /* clear short packet naking */
887 net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
888 goto done;
889 }
890 }
891
892 /* try dma first */
893 status = net2272_kick_dma(ep, req);
894
895 if (status < 0) {
896 /* dma failed (most likely in use by another endpoint)
897 * fallback to pio
898 */
899 status = 0;
900
901 if (ep->is_in)
902 status = net2272_write_fifo(ep, req);
903 else {
904 s = net2272_ep_read(ep, EP_STAT0);
905 if ((s & (1 << BUFFER_EMPTY)) == 0)
906 status = net2272_read_fifo(ep, req);
907 }
908
909 if (unlikely(status != 0)) {
910 if (status > 0)
911 status = 0;
912 req = NULL;
913 }
914 }
915 }
916 if (likely(req != 0))
917 list_add_tail(&req->queue, &ep->queue);
918
919 if (likely(!list_empty(&ep->queue)))
920 net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
921 done:
922 spin_unlock_irqrestore(&dev->lock, flags);
923
924 return 0;
925}
926
927/* dequeue ALL requests */
928static void
929net2272_dequeue_all(struct net2272_ep *ep)
930{
931 struct net2272_request *req;
932
933 /* called with spinlock held */
934 ep->stopped = 1;
935
936 while (!list_empty(&ep->queue)) {
937 req = list_entry(ep->queue.next,
938 struct net2272_request,
939 queue);
940 net2272_done(ep, req, -ESHUTDOWN);
941 }
942}
943
944/* dequeue JUST ONE request */
945static int
946net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
947{
948 struct net2272_ep *ep;
949 struct net2272_request *req;
950 unsigned long flags;
951 int stopped;
952
953 ep = container_of(_ep, struct net2272_ep, ep);
954 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
955 return -EINVAL;
956
957 spin_lock_irqsave(&ep->dev->lock, flags);
958 stopped = ep->stopped;
959 ep->stopped = 1;
960
961 /* make sure it's still queued on this endpoint */
962 list_for_each_entry(req, &ep->queue, queue) {
963 if (&req->req == _req)
964 break;
965 }
966 if (&req->req != _req) {
967 spin_unlock_irqrestore(&ep->dev->lock, flags);
968 return -EINVAL;
969 }
970
971 /* queue head may be partially complete */
972 if (ep->queue.next == &req->queue) {
973 dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
974 net2272_done(ep, req, -ECONNRESET);
975 }
976 req = NULL;
977 ep->stopped = stopped;
978
979 spin_unlock_irqrestore(&ep->dev->lock, flags);
980 return 0;
981}
982
983/*---------------------------------------------------------------------------*/
984
985static int
986net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
987{
988 struct net2272_ep *ep;
989 unsigned long flags;
990 int ret = 0;
991
992 ep = container_of(_ep, struct net2272_ep, ep);
993 if (!_ep || (!ep->desc && ep->num != 0))
994 return -EINVAL;
995 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
996 return -ESHUTDOWN;
997 if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
998 return -EINVAL;
999
1000 spin_lock_irqsave(&ep->dev->lock, flags);
1001 if (!list_empty(&ep->queue))
1002 ret = -EAGAIN;
1003 else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1004 ret = -EAGAIN;
1005 else {
1006 dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1007 value ? "set" : "clear",
1008 wedged ? "wedge" : "halt");
1009 /* set/clear */
1010 if (value) {
1011 if (ep->num == 0)
1012 ep->dev->protocol_stall = 1;
1013 else
1014 set_halt(ep);
1015 if (wedged)
1016 ep->wedged = 1;
1017 } else {
1018 clear_halt(ep);
1019 ep->wedged = 0;
1020 }
1021 }
1022 spin_unlock_irqrestore(&ep->dev->lock, flags);
1023
1024 return ret;
1025}
1026
1027static int
1028net2272_set_halt(struct usb_ep *_ep, int value)
1029{
1030 return net2272_set_halt_and_wedge(_ep, value, 0);
1031}
1032
1033static int
1034net2272_set_wedge(struct usb_ep *_ep)
1035{
1036 if (!_ep || _ep->name == ep0name)
1037 return -EINVAL;
1038 return net2272_set_halt_and_wedge(_ep, 1, 1);
1039}
1040
1041static int
1042net2272_fifo_status(struct usb_ep *_ep)
1043{
1044 struct net2272_ep *ep;
1045 u16 avail;
1046
1047 ep = container_of(_ep, struct net2272_ep, ep);
1048 if (!_ep || (!ep->desc && ep->num != 0))
1049 return -ENODEV;
1050 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1051 return -ESHUTDOWN;
1052
1053 avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1054 avail |= net2272_ep_read(ep, EP_AVAIL0);
1055 if (avail > ep->fifo_size)
1056 return -EOVERFLOW;
1057 if (ep->is_in)
1058 avail = ep->fifo_size - avail;
1059 return avail;
1060}
1061
1062static void
1063net2272_fifo_flush(struct usb_ep *_ep)
1064{
1065 struct net2272_ep *ep;
1066
1067 ep = container_of(_ep, struct net2272_ep, ep);
1068 if (!_ep || (!ep->desc && ep->num != 0))
1069 return;
1070 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1071 return;
1072
1073 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1074}
1075
1076static struct usb_ep_ops net2272_ep_ops = {
1077 .enable = net2272_enable,
1078 .disable = net2272_disable,
1079
1080 .alloc_request = net2272_alloc_request,
1081 .free_request = net2272_free_request,
1082
1083 .queue = net2272_queue,
1084 .dequeue = net2272_dequeue,
1085
1086 .set_halt = net2272_set_halt,
1087 .set_wedge = net2272_set_wedge,
1088 .fifo_status = net2272_fifo_status,
1089 .fifo_flush = net2272_fifo_flush,
1090};
1091
1092/*---------------------------------------------------------------------------*/
1093
1094static int
1095net2272_get_frame(struct usb_gadget *_gadget)
1096{
1097 struct net2272 *dev;
1098 unsigned long flags;
1099 u16 ret;
1100
1101 if (!_gadget)
1102 return -ENODEV;
1103 dev = container_of(_gadget, struct net2272, gadget);
1104 spin_lock_irqsave(&dev->lock, flags);
1105
1106 ret = net2272_read(dev, FRAME1) << 8;
1107 ret |= net2272_read(dev, FRAME0);
1108
1109 spin_unlock_irqrestore(&dev->lock, flags);
1110 return ret;
1111}
1112
1113static int
1114net2272_wakeup(struct usb_gadget *_gadget)
1115{
1116 struct net2272 *dev;
1117 u8 tmp;
1118 unsigned long flags;
1119
1120 if (!_gadget)
1121 return 0;
1122 dev = container_of(_gadget, struct net2272, gadget);
1123
1124 spin_lock_irqsave(&dev->lock, flags);
1125 tmp = net2272_read(dev, USBCTL0);
1126 if (tmp & (1 << IO_WAKEUP_ENABLE))
1127 net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1128
1129 spin_unlock_irqrestore(&dev->lock, flags);
1130
1131 return 0;
1132}
1133
1134static int
1135net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1136{
1137 struct net2272 *dev;
1138
1139 if (!_gadget)
1140 return -ENODEV;
1141 dev = container_of(_gadget, struct net2272, gadget);
1142
1143 dev->is_selfpowered = value;
1144
1145 return 0;
1146}
1147
1148static int
1149net2272_pullup(struct usb_gadget *_gadget, int is_on)
1150{
1151 struct net2272 *dev;
1152 u8 tmp;
1153 unsigned long flags;
1154
1155 if (!_gadget)
1156 return -ENODEV;
1157 dev = container_of(_gadget, struct net2272, gadget);
1158
1159 spin_lock_irqsave(&dev->lock, flags);
1160 tmp = net2272_read(dev, USBCTL0);
1161 dev->softconnect = (is_on != 0);
1162 if (is_on)
1163 tmp |= (1 << USB_DETECT_ENABLE);
1164 else
1165 tmp &= ~(1 << USB_DETECT_ENABLE);
1166 net2272_write(dev, USBCTL0, tmp);
1167 spin_unlock_irqrestore(&dev->lock, flags);
1168
1169 return 0;
1170}
1171
1172static int net2272_start(struct usb_gadget *_gadget,
1173 struct usb_gadget_driver *driver);
1174static int net2272_stop(struct usb_gadget *_gadget,
1175 struct usb_gadget_driver *driver);
1176
1177static const struct usb_gadget_ops net2272_ops = {
1178 .get_frame = net2272_get_frame,
1179 .wakeup = net2272_wakeup,
1180 .set_selfpowered = net2272_set_selfpowered,
1181 .pullup = net2272_pullup,
1182 .udc_start = net2272_start,
1183 .udc_stop = net2272_stop,
1184};
1185
1186/*---------------------------------------------------------------------------*/
1187
1188static ssize_t
1189net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
1190{
1191 struct net2272 *dev;
1192 char *next;
1193 unsigned size, t;
1194 unsigned long flags;
1195 u8 t1, t2;
1196 int i;
1197 const char *s;
1198
1199 dev = dev_get_drvdata(_dev);
1200 next = buf;
1201 size = PAGE_SIZE;
1202 spin_lock_irqsave(&dev->lock, flags);
1203
1204 if (dev->driver)
1205 s = dev->driver->driver.name;
1206 else
1207 s = "(none)";
1208
1209 /* Main Control Registers */
1210 t = scnprintf(next, size, "%s version %s,"
1211 "chiprev %02x, locctl %02x\n"
1212 "irqenb0 %02x irqenb1 %02x "
1213 "irqstat0 %02x irqstat1 %02x\n",
1214 driver_name, driver_vers, dev->chiprev,
1215 net2272_read(dev, LOCCTL),
1216 net2272_read(dev, IRQENB0),
1217 net2272_read(dev, IRQENB1),
1218 net2272_read(dev, IRQSTAT0),
1219 net2272_read(dev, IRQSTAT1));
1220 size -= t;
1221 next += t;
1222
1223 /* DMA */
1224 t1 = net2272_read(dev, DMAREQ);
1225 t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1226 t1, ep_name[(t1 & 0x01) + 1],
1227 t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1228 t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1229 t1 & (1 << DMA_REQUEST) ? "req " : "",
1230 t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1231 size -= t;
1232 next += t;
1233
1234 /* USB Control Registers */
1235 t1 = net2272_read(dev, USBCTL1);
1236 if (t1 & (1 << VBUS_PIN)) {
1237 if (t1 & (1 << USB_HIGH_SPEED))
1238 s = "high speed";
1239 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1240 s = "powered";
1241 else
1242 s = "full speed";
1243 } else
1244 s = "not attached";
1245 t = scnprintf(next, size,
1246 "usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1247 net2272_read(dev, USBCTL0), t1,
1248 net2272_read(dev, OURADDR), s);
1249 size -= t;
1250 next += t;
1251
1252 /* Endpoint Registers */
1253 for (i = 0; i < 4; ++i) {
1254 struct net2272_ep *ep;
1255
1256 ep = &dev->ep[i];
1257 if (i && !ep->desc)
1258 continue;
1259
1260 t1 = net2272_ep_read(ep, EP_CFG);
1261 t2 = net2272_ep_read(ep, EP_RSPSET);
1262 t = scnprintf(next, size,
1263 "\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1264 "irqenb %02x\n",
1265 ep->ep.name, t1, t2,
1266 (t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1267 (t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1268 (t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1269 (t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1270 (t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1271 (t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1272 (t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1273 (t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1274 net2272_ep_read(ep, EP_IRQENB));
1275 size -= t;
1276 next += t;
1277
1278 t = scnprintf(next, size,
1279 "\tstat0 %02x stat1 %02x avail %04x "
1280 "(ep%d%s-%s)%s\n",
1281 net2272_ep_read(ep, EP_STAT0),
1282 net2272_ep_read(ep, EP_STAT1),
1283 (net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1284 t1 & 0x0f,
1285 ep->is_in ? "in" : "out",
1286 type_string(t1 >> 5),
1287 ep->stopped ? "*" : "");
1288 size -= t;
1289 next += t;
1290
1291 t = scnprintf(next, size,
1292 "\tep_transfer %06x\n",
1293 ((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1294 ((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1295 ((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1296 size -= t;
1297 next += t;
1298
1299 t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1300 t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1301 t = scnprintf(next, size,
1302 "\tbuf-a %s buf-b %s\n",
1303 buf_state_string(t1),
1304 buf_state_string(t2));
1305 size -= t;
1306 next += t;
1307 }
1308
1309 spin_unlock_irqrestore(&dev->lock, flags);
1310
1311 return PAGE_SIZE - size;
1312}
1313static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
1314
1315/*---------------------------------------------------------------------------*/
1316
1317static void
1318net2272_set_fifo_mode(struct net2272 *dev, int mode)
1319{
1320 u8 tmp;
1321
1322 tmp = net2272_read(dev, LOCCTL) & 0x3f;
1323 tmp |= (mode << 6);
1324 net2272_write(dev, LOCCTL, tmp);
1325
1326 INIT_LIST_HEAD(&dev->gadget.ep_list);
1327
1328 /* always ep-a, ep-c ... maybe not ep-b */
1329 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1330
1331 switch (mode) {
1332 case 0:
1333 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1334 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1335 break;
1336 case 1:
1337 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1338 dev->ep[1].fifo_size = 1024;
1339 dev->ep[2].fifo_size = 512;
1340 break;
1341 case 2:
1342 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1343 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1344 break;
1345 case 3:
1346 dev->ep[1].fifo_size = 1024;
1347 break;
1348 }
1349
1350 /* ep-c is always 2 512 byte buffers */
1351 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1352 dev->ep[3].fifo_size = 512;
1353}
1354
1355/*---------------------------------------------------------------------------*/
1356
1357static void
1358net2272_usb_reset(struct net2272 *dev)
1359{
1360 dev->gadget.speed = USB_SPEED_UNKNOWN;
1361
1362 net2272_cancel_dma(dev);
1363
1364 net2272_write(dev, IRQENB0, 0);
1365 net2272_write(dev, IRQENB1, 0);
1366
1367 /* clear irq state */
1368 net2272_write(dev, IRQSTAT0, 0xff);
1369 net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1370
1371 net2272_write(dev, DMAREQ,
1372 (0 << DMA_BUFFER_VALID) |
1373 (0 << DMA_REQUEST_ENABLE) |
1374 (1 << DMA_CONTROL_DACK) |
1375 (dev->dma_eot_polarity << EOT_POLARITY) |
1376 (dev->dma_dack_polarity << DACK_POLARITY) |
1377 (dev->dma_dreq_polarity << DREQ_POLARITY) |
1378 ((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1379
1380 net2272_cancel_dma(dev);
1381 net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1382
1383 /* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1384 * note that the higher level gadget drivers are expected to convert data to little endian.
1385 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1386 */
1387 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1388 net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1389}
1390
1391static void
1392net2272_usb_reinit(struct net2272 *dev)
1393{
1394 int i;
1395
1396 /* basic endpoint init */
1397 for (i = 0; i < 4; ++i) {
1398 struct net2272_ep *ep = &dev->ep[i];
1399
1400 ep->ep.name = ep_name[i];
1401 ep->dev = dev;
1402 ep->num = i;
1403 ep->not_empty = 0;
1404
1405 if (use_dma && ep->num == dma_ep)
1406 ep->dma = 1;
1407
1408 if (i > 0 && i <= 3)
1409 ep->fifo_size = 512;
1410 else
1411 ep->fifo_size = 64;
1412 net2272_ep_reset(ep);
1413 }
1414 dev->ep[0].ep.maxpacket = 64;
1415
1416 dev->gadget.ep0 = &dev->ep[0].ep;
1417 dev->ep[0].stopped = 0;
1418 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1419}
1420
1421static void
1422net2272_ep0_start(struct net2272 *dev)
1423{
1424 struct net2272_ep *ep0 = &dev->ep[0];
1425
1426 net2272_ep_write(ep0, EP_RSPSET,
1427 (1 << NAK_OUT_PACKETS_MODE) |
1428 (1 << ALT_NAK_OUT_PACKETS));
1429 net2272_ep_write(ep0, EP_RSPCLR,
1430 (1 << HIDE_STATUS_PHASE) |
1431 (1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1432 net2272_write(dev, USBCTL0,
1433 (dev->softconnect << USB_DETECT_ENABLE) |
1434 (1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1435 (1 << IO_WAKEUP_ENABLE));
1436 net2272_write(dev, IRQENB0,
1437 (1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1438 (1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1439 (1 << DMA_DONE_INTERRUPT_ENABLE));
1440 net2272_write(dev, IRQENB1,
1441 (1 << VBUS_INTERRUPT_ENABLE) |
1442 (1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1443 (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1444}
1445
1446/* when a driver is successfully registered, it will receive
1447 * control requests including set_configuration(), which enables
1448 * non-control requests. then usb traffic follows until a
1449 * disconnect is reported. then a host may connect again, or
1450 * the driver might get unbound.
1451 */
1452static int net2272_start(struct usb_gadget *_gadget,
1453 struct usb_gadget_driver *driver)
1454{
1455 struct net2272 *dev;
1456 unsigned i;
1457
1458 if (!driver || !driver->unbind || !driver->setup ||
1459 driver->max_speed != USB_SPEED_HIGH)
1460 return -EINVAL;
1461
1462 dev = container_of(_gadget, struct net2272, gadget);
1463
1464 for (i = 0; i < 4; ++i)
1465 dev->ep[i].irqs = 0;
1466 /* hook up the driver ... */
1467 dev->softconnect = 1;
1468 driver->driver.bus = NULL;
1469 dev->driver = driver;
1470 dev->gadget.dev.driver = &driver->driver;
1471
1472 /* ... then enable host detection and ep0; and we're ready
1473 * for set_configuration as well as eventual disconnect.
1474 */
1475 net2272_ep0_start(dev);
1476
1477 dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
1478
1479 return 0;
1480}
1481
1482static void
1483stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1484{
1485 int i;
1486
1487 /* don't disconnect if it's not connected */
1488 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1489 driver = NULL;
1490
1491 /* stop hardware; prevent new request submissions;
1492 * and kill any outstanding requests.
1493 */
1494 net2272_usb_reset(dev);
1495 for (i = 0; i < 4; ++i)
1496 net2272_dequeue_all(&dev->ep[i]);
1497
1498 net2272_usb_reinit(dev);
1499}
1500
1501static int net2272_stop(struct usb_gadget *_gadget,
1502 struct usb_gadget_driver *driver)
1503{
1504 struct net2272 *dev;
1505 unsigned long flags;
1506
1507 dev = container_of(_gadget, struct net2272, gadget);
1508
1509 spin_lock_irqsave(&dev->lock, flags);
1510 stop_activity(dev, driver);
1511 spin_unlock_irqrestore(&dev->lock, flags);
1512
1513 dev->gadget.dev.driver = NULL;
1514 dev->driver = NULL;
1515
1516 dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
1517 return 0;
1518}
1519
1520/*---------------------------------------------------------------------------*/
1521/* handle ep-a/ep-b dma completions */
1522static void
1523net2272_handle_dma(struct net2272_ep *ep)
1524{
1525 struct net2272_request *req;
1526 unsigned len;
1527 int status;
1528
1529 if (!list_empty(&ep->queue))
1530 req = list_entry(ep->queue.next,
1531 struct net2272_request, queue);
1532 else
1533 req = NULL;
1534
1535 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1536
1537 /* Ensure DREQ is de-asserted */
1538 net2272_write(ep->dev, DMAREQ,
1539 (0 << DMA_BUFFER_VALID)
1540 | (0 << DMA_REQUEST_ENABLE)
1541 | (1 << DMA_CONTROL_DACK)
1542 | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1543 | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1544 | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1545 | ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
1546
1547 ep->dev->dma_busy = 0;
1548
1549 net2272_ep_write(ep, EP_IRQENB,
1550 (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1551 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1552 | net2272_ep_read(ep, EP_IRQENB));
1553
1554 /* device-to-host transfer completed */
1555 if (ep->is_in) {
1556 /* validate a short packet or zlp if necessary */
1557 if ((req->req.length % ep->ep.maxpacket != 0) ||
1558 req->req.zero)
1559 set_fifo_bytecount(ep, 0);
1560
1561 net2272_done(ep, req, 0);
1562 if (!list_empty(&ep->queue)) {
1563 req = list_entry(ep->queue.next,
1564 struct net2272_request, queue);
1565 status = net2272_kick_dma(ep, req);
1566 if (status < 0)
1567 net2272_pio_advance(ep);
1568 }
1569
1570 /* host-to-device transfer completed */
1571 } else {
1572 /* terminated with a short packet? */
1573 if (net2272_read(ep->dev, IRQSTAT0) &
1574 (1 << DMA_DONE_INTERRUPT)) {
1575 /* abort system dma */
1576 net2272_cancel_dma(ep->dev);
1577 }
1578
1579 /* EP_TRANSFER will contain the number of bytes
1580 * actually received.
1581 * NOTE: There is no overflow detection on EP_TRANSFER:
1582 * We can't deal with transfers larger than 2^24 bytes!
1583 */
1584 len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1585 | (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1586 | (net2272_ep_read(ep, EP_TRANSFER0));
1587
1588 if (ep->not_empty)
1589 len += 4;
1590
1591 req->req.actual += len;
1592
1593 /* get any remaining data */
1594 net2272_pio_advance(ep);
1595 }
1596}
1597
1598/*---------------------------------------------------------------------------*/
1599
1600static void
1601net2272_handle_ep(struct net2272_ep *ep)
1602{
1603 struct net2272_request *req;
1604 u8 stat0, stat1;
1605
1606 if (!list_empty(&ep->queue))
1607 req = list_entry(ep->queue.next,
1608 struct net2272_request, queue);
1609 else
1610 req = NULL;
1611
1612 /* ack all, and handle what we care about */
1613 stat0 = net2272_ep_read(ep, EP_STAT0);
1614 stat1 = net2272_ep_read(ep, EP_STAT1);
1615 ep->irqs++;
1616
1617 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1618 ep->ep.name, stat0, stat1, req ? &req->req : 0);
1619
1620 net2272_ep_write(ep, EP_STAT0, stat0 &
1621 ~((1 << NAK_OUT_PACKETS)
1622 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1623 net2272_ep_write(ep, EP_STAT1, stat1);
1624
1625 /* data packet(s) received (in the fifo, OUT)
1626 * direction must be validated, otherwise control read status phase
1627 * could be interpreted as a valid packet
1628 */
1629 if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1630 net2272_pio_advance(ep);
1631 /* data packet(s) transmitted (IN) */
1632 else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1633 net2272_pio_advance(ep);
1634}
1635
1636static struct net2272_ep *
1637net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1638{
1639 struct net2272_ep *ep;
1640
1641 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1642 return &dev->ep[0];
1643
1644 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1645 u8 bEndpointAddress;
1646
1647 if (!ep->desc)
1648 continue;
1649 bEndpointAddress = ep->desc->bEndpointAddress;
1650 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1651 continue;
1652 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1653 return ep;
1654 }
1655 return NULL;
1656}
1657
1658/*
1659 * USB Test Packet:
1660 * JKJKJKJK * 9
1661 * JJKKJJKK * 8
1662 * JJJJKKKK * 8
1663 * JJJJJJJKKKKKKK * 8
1664 * JJJJJJJK * 8
1665 * {JKKKKKKK * 10}, JK
1666 */
1667static const u8 net2272_test_packet[] = {
1668 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1669 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1670 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1671 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1672 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1673 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1674};
1675
1676static void
1677net2272_set_test_mode(struct net2272 *dev, int mode)
1678{
1679 int i;
1680
1681 /* Disable all net2272 interrupts:
1682 * Nothing but a power cycle should stop the test.
1683 */
1684 net2272_write(dev, IRQENB0, 0x00);
1685 net2272_write(dev, IRQENB1, 0x00);
1686
1687 /* Force tranceiver to high-speed */
1688 net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1689
1690 net2272_write(dev, PAGESEL, 0);
1691 net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1692 net2272_write(dev, EP_RSPCLR,
1693 (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1694 | (1 << HIDE_STATUS_PHASE));
1695 net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1696 net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1697
1698 /* wait for status phase to complete */
1699 while (!(net2272_read(dev, EP_STAT0) &
1700 (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1701 ;
1702
1703 /* Enable test mode */
1704 net2272_write(dev, USBTEST, mode);
1705
1706 /* load test packet */
1707 if (mode == TEST_PACKET) {
1708 /* switch to 8 bit mode */
1709 net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1710 ~(1 << DATA_WIDTH));
1711
1712 for (i = 0; i < sizeof(net2272_test_packet); ++i)
1713 net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1714
1715 /* Validate test packet */
1716 net2272_write(dev, EP_TRANSFER0, 0);
1717 }
1718}
1719
1720static void
1721net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1722{
1723 struct net2272_ep *ep;
1724 u8 num, scratch;
1725
1726 /* starting a control request? */
1727 if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1728 union {
1729 u8 raw[8];
1730 struct usb_ctrlrequest r;
1731 } u;
1732 int tmp = 0;
1733 struct net2272_request *req;
1734
1735 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1736 if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1737 dev->gadget.speed = USB_SPEED_HIGH;
1738 else
1739 dev->gadget.speed = USB_SPEED_FULL;
1740 dev_dbg(dev->dev, "%s\n",
1741 usb_speed_string(dev->gadget.speed));
1742 }
1743
1744 ep = &dev->ep[0];
1745 ep->irqs++;
1746
1747 /* make sure any leftover interrupt state is cleared */
1748 stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1749 while (!list_empty(&ep->queue)) {
1750 req = list_entry(ep->queue.next,
1751 struct net2272_request, queue);
1752 net2272_done(ep, req,
1753 (req->req.actual == req->req.length) ? 0 : -EPROTO);
1754 }
1755 ep->stopped = 0;
1756 dev->protocol_stall = 0;
1757 net2272_ep_write(ep, EP_STAT0,
1758 (1 << DATA_IN_TOKEN_INTERRUPT)
1759 | (1 << DATA_OUT_TOKEN_INTERRUPT)
1760 | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1761 | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1762 | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1763 net2272_ep_write(ep, EP_STAT1,
1764 (1 << TIMEOUT)
1765 | (1 << USB_OUT_ACK_SENT)
1766 | (1 << USB_OUT_NAK_SENT)
1767 | (1 << USB_IN_ACK_RCVD)
1768 | (1 << USB_IN_NAK_SENT)
1769 | (1 << USB_STALL_SENT)
1770 | (1 << LOCAL_OUT_ZLP));
1771
1772 /*
1773 * Ensure Control Read pre-validation setting is beyond maximum size
1774 * - Control Writes can leave non-zero values in EP_TRANSFER. If
1775 * an EP0 transfer following the Control Write is a Control Read,
1776 * the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1777 * pre-validation count.
1778 * - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1779 * the pre-validation count cannot cause an unexpected validatation
1780 */
1781 net2272_write(dev, PAGESEL, 0);
1782 net2272_write(dev, EP_TRANSFER2, 0xff);
1783 net2272_write(dev, EP_TRANSFER1, 0xff);
1784 net2272_write(dev, EP_TRANSFER0, 0xff);
1785
1786 u.raw[0] = net2272_read(dev, SETUP0);
1787 u.raw[1] = net2272_read(dev, SETUP1);
1788 u.raw[2] = net2272_read(dev, SETUP2);
1789 u.raw[3] = net2272_read(dev, SETUP3);
1790 u.raw[4] = net2272_read(dev, SETUP4);
1791 u.raw[5] = net2272_read(dev, SETUP5);
1792 u.raw[6] = net2272_read(dev, SETUP6);
1793 u.raw[7] = net2272_read(dev, SETUP7);
1794 /*
1795 * If you have a big endian cpu make sure le16_to_cpus
1796 * performs the proper byte swapping here...
1797 */
1798 le16_to_cpus(&u.r.wValue);
1799 le16_to_cpus(&u.r.wIndex);
1800 le16_to_cpus(&u.r.wLength);
1801
1802 /* ack the irq */
1803 net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1804 stat ^= (1 << SETUP_PACKET_INTERRUPT);
1805
1806 /* watch control traffic at the token level, and force
1807 * synchronization before letting the status phase happen.
1808 */
1809 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1810 if (ep->is_in) {
1811 scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1812 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814 stop_out_naking(ep);
1815 } else
1816 scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1817 | (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1818 | (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1819 net2272_ep_write(ep, EP_IRQENB, scratch);
1820
1821 if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1822 goto delegate;
1823 switch (u.r.bRequest) {
1824 case USB_REQ_GET_STATUS: {
1825 struct net2272_ep *e;
1826 u16 status = 0;
1827
1828 switch (u.r.bRequestType & USB_RECIP_MASK) {
1829 case USB_RECIP_ENDPOINT:
1830 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1831 if (!e || u.r.wLength > 2)
1832 goto do_stall;
1833 if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1834 status = __constant_cpu_to_le16(1);
1835 else
1836 status = __constant_cpu_to_le16(0);
1837
1838 /* don't bother with a request object! */
1839 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1840 writew(status, net2272_reg_addr(dev, EP_DATA));
1841 set_fifo_bytecount(&dev->ep[0], 0);
1842 allow_status(ep);
1843 dev_vdbg(dev->dev, "%s stat %02x\n",
1844 ep->ep.name, status);
1845 goto next_endpoints;
1846 case USB_RECIP_DEVICE:
1847 if (u.r.wLength > 2)
1848 goto do_stall;
1849 if (dev->is_selfpowered)
1850 status = (1 << USB_DEVICE_SELF_POWERED);
1851
1852 /* don't bother with a request object! */
1853 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1854 writew(status, net2272_reg_addr(dev, EP_DATA));
1855 set_fifo_bytecount(&dev->ep[0], 0);
1856 allow_status(ep);
1857 dev_vdbg(dev->dev, "device stat %02x\n", status);
1858 goto next_endpoints;
1859 case USB_RECIP_INTERFACE:
1860 if (u.r.wLength > 2)
1861 goto do_stall;
1862
1863 /* don't bother with a request object! */
1864 net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1865 writew(status, net2272_reg_addr(dev, EP_DATA));
1866 set_fifo_bytecount(&dev->ep[0], 0);
1867 allow_status(ep);
1868 dev_vdbg(dev->dev, "interface status %02x\n", status);
1869 goto next_endpoints;
1870 }
1871
1872 break;
1873 }
1874 case USB_REQ_CLEAR_FEATURE: {
1875 struct net2272_ep *e;
1876
1877 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1878 goto delegate;
1879 if (u.r.wValue != USB_ENDPOINT_HALT ||
1880 u.r.wLength != 0)
1881 goto do_stall;
1882 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1883 if (!e)
1884 goto do_stall;
1885 if (e->wedged) {
1886 dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1887 ep->ep.name);
1888 } else {
1889 dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1890 clear_halt(e);
1891 }
1892 allow_status(ep);
1893 goto next_endpoints;
1894 }
1895 case USB_REQ_SET_FEATURE: {
1896 struct net2272_ep *e;
1897
1898 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1899 if (u.r.wIndex != NORMAL_OPERATION)
1900 net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1901 allow_status(ep);
1902 dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1903 goto next_endpoints;
1904 } else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1905 goto delegate;
1906 if (u.r.wValue != USB_ENDPOINT_HALT ||
1907 u.r.wLength != 0)
1908 goto do_stall;
1909 e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1910 if (!e)
1911 goto do_stall;
1912 set_halt(e);
1913 allow_status(ep);
1914 dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1915 goto next_endpoints;
1916 }
1917 case USB_REQ_SET_ADDRESS: {
1918 net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1919 allow_status(ep);
1920 break;
1921 }
1922 default:
1923 delegate:
1924 dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1925 "ep_cfg %08x\n",
1926 u.r.bRequestType, u.r.bRequest,
1927 u.r.wValue, u.r.wIndex,
1928 net2272_ep_read(ep, EP_CFG));
1929 spin_unlock(&dev->lock);
1930 tmp = dev->driver->setup(&dev->gadget, &u.r);
1931 spin_lock(&dev->lock);
1932 }
1933
1934 /* stall ep0 on error */
1935 if (tmp < 0) {
1936 do_stall:
1937 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1938 u.r.bRequestType, u.r.bRequest, tmp);
1939 dev->protocol_stall = 1;
1940 }
1941 /* endpoint dma irq? */
1942 } else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1943 net2272_cancel_dma(dev);
1944 net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1945 stat &= ~(1 << DMA_DONE_INTERRUPT);
1946 num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1947 ? 2 : 1;
1948
1949 ep = &dev->ep[num];
1950 net2272_handle_dma(ep);
1951 }
1952
1953 next_endpoints:
1954 /* endpoint data irq? */
1955 scratch = stat & 0x0f;
1956 stat &= ~0x0f;
1957 for (num = 0; scratch; num++) {
1958 u8 t;
1959
1960 /* does this endpoint's FIFO and queue need tending? */
1961 t = 1 << num;
1962 if ((scratch & t) == 0)
1963 continue;
1964 scratch ^= t;
1965
1966 ep = &dev->ep[num];
1967 net2272_handle_ep(ep);
1968 }
1969
1970 /* some interrupts we can just ignore */
1971 stat &= ~(1 << SOF_INTERRUPT);
1972
1973 if (stat)
1974 dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1975}
1976
1977static void
1978net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1979{
1980 u8 tmp, mask;
1981
1982 /* after disconnect there's nothing else to do! */
1983 tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1984 mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1985
1986 if (stat & tmp) {
1987 net2272_write(dev, IRQSTAT1, tmp);
1988 if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
1989 ((net2272_read(dev, USBCTL1) & mask) == 0))
1990 || ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
1991 == 0))
1992 && (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
1993 dev_dbg(dev->dev, "disconnect %s\n",
1994 dev->driver->driver.name);
1995 stop_activity(dev, dev->driver);
1996 net2272_ep0_start(dev);
1997 return;
1998 }
1999 stat &= ~tmp;
2000
2001 if (!stat)
2002 return;
2003 }
2004
2005 tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2006 if (stat & tmp) {
2007 net2272_write(dev, IRQSTAT1, tmp);
2008 if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2009 if (dev->driver->suspend)
2010 dev->driver->suspend(&dev->gadget);
2011 if (!enable_suspend) {
2012 stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2013 dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2014 }
2015 } else {
2016 if (dev->driver->resume)
2017 dev->driver->resume(&dev->gadget);
2018 }
2019 stat &= ~tmp;
2020 }
2021
2022 /* clear any other status/irqs */
2023 if (stat)
2024 net2272_write(dev, IRQSTAT1, stat);
2025
2026 /* some status we can just ignore */
2027 stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2028 | (1 << SUSPEND_REQUEST_INTERRUPT)
2029 | (1 << RESUME_INTERRUPT));
2030 if (!stat)
2031 return;
2032 else
2033 dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2034}
2035
2036static irqreturn_t net2272_irq(int irq, void *_dev)
2037{
2038 struct net2272 *dev = _dev;
2039#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2040 u32 intcsr;
2041#endif
2042#if defined(PLX_PCI_RDK)
2043 u8 dmareq;
2044#endif
2045 spin_lock(&dev->lock);
2046#if defined(PLX_PCI_RDK)
2047 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2048
2049 if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2050 writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2051 dev->rdk1.plx9054_base_addr + INTCSR);
2052 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2053 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2054 intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2055 writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2056 dev->rdk1.plx9054_base_addr + INTCSR);
2057 }
2058 if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2059 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2060 dev->rdk1.plx9054_base_addr + DMACSR0);
2061
2062 dmareq = net2272_read(dev, DMAREQ);
2063 if (dmareq & 0x01)
2064 net2272_handle_dma(&dev->ep[2]);
2065 else
2066 net2272_handle_dma(&dev->ep[1]);
2067 }
2068#endif
2069#if defined(PLX_PCI_RDK2)
2070 /* see if PCI int for us by checking irqstat */
2071 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2072 if (!intcsr & (1 << NET2272_PCI_IRQ))
2073 return IRQ_NONE;
2074 /* check dma interrupts */
2075#endif
2076 /* Platform/devcice interrupt handler */
2077#if !defined(PLX_PCI_RDK)
2078 net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2079 net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2080#endif
2081 spin_unlock(&dev->lock);
2082
2083 return IRQ_HANDLED;
2084}
2085
2086static int net2272_present(struct net2272 *dev)
2087{
2088 /*
2089 * Quick test to see if CPU can communicate properly with the NET2272.
2090 * Verifies connection using writes and reads to write/read and
2091 * read-only registers.
2092 *
2093 * This routine is strongly recommended especially during early bring-up
2094 * of new hardware, however for designs that do not apply Power On System
2095 * Tests (POST) it may discarded (or perhaps minimized).
2096 */
2097 unsigned int ii;
2098 u8 val, refval;
2099
2100 /* Verify NET2272 write/read SCRATCH register can write and read */
2101 refval = net2272_read(dev, SCRATCH);
2102 for (ii = 0; ii < 0x100; ii += 7) {
2103 net2272_write(dev, SCRATCH, ii);
2104 val = net2272_read(dev, SCRATCH);
2105 if (val != ii) {
2106 dev_dbg(dev->dev,
2107 "%s: write/read SCRATCH register test failed: "
2108 "wrote:0x%2.2x, read:0x%2.2x\n",
2109 __func__, ii, val);
2110 return -EINVAL;
2111 }
2112 }
2113 /* To be nice, we write the original SCRATCH value back: */
2114 net2272_write(dev, SCRATCH, refval);
2115
2116 /* Verify NET2272 CHIPREV register is read-only: */
2117 refval = net2272_read(dev, CHIPREV_2272);
2118 for (ii = 0; ii < 0x100; ii += 7) {
2119 net2272_write(dev, CHIPREV_2272, ii);
2120 val = net2272_read(dev, CHIPREV_2272);
2121 if (val != refval) {
2122 dev_dbg(dev->dev,
2123 "%s: write/read CHIPREV register test failed: "
2124 "wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2125 __func__, ii, val, refval);
2126 return -EINVAL;
2127 }
2128 }
2129
2130 /*
2131 * Verify NET2272's "NET2270 legacy revision" register
2132 * - NET2272 has two revision registers. The NET2270 legacy revision
2133 * register should read the same value, regardless of the NET2272
2134 * silicon revision. The legacy register applies to NET2270
2135 * firmware being applied to the NET2272.
2136 */
2137 val = net2272_read(dev, CHIPREV_LEGACY);
2138 if (val != NET2270_LEGACY_REV) {
2139 /*
2140 * Unexpected legacy revision value
2141 * - Perhaps the chip is a NET2270?
2142 */
2143 dev_dbg(dev->dev,
2144 "%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2145 " - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2146 __func__, NET2270_LEGACY_REV, val);
2147 return -EINVAL;
2148 }
2149
2150 /*
2151 * Verify NET2272 silicon revision
2152 * - This revision register is appropriate for the silicon version
2153 * of the NET2272
2154 */
2155 val = net2272_read(dev, CHIPREV_2272);
2156 switch (val) {
2157 case CHIPREV_NET2272_R1:
2158 /*
2159 * NET2272 Rev 1 has DMA related errata:
2160 * - Newer silicon (Rev 1A or better) required
2161 */
2162 dev_dbg(dev->dev,
2163 "%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2164 __func__);
2165 break;
2166 case CHIPREV_NET2272_R1A:
2167 break;
2168 default:
2169 /* NET2272 silicon version *may* not work with this firmware */
2170 dev_dbg(dev->dev,
2171 "%s: unexpected silicon revision register value: "
2172 " CHIPREV_2272: 0x%2.2x\n",
2173 __func__, val);
2174 /*
2175 * Return Success, even though the chip rev is not an expected value
2176 * - Older, pre-built firmware can attempt to operate on newer silicon
2177 * - Often, new silicon is perfectly compatible
2178 */
2179 }
2180
2181 /* Success: NET2272 checks out OK */
2182 return 0;
2183}
2184
2185static void
2186net2272_gadget_release(struct device *_dev)
2187{
2188 struct net2272 *dev = dev_get_drvdata(_dev);
2189 kfree(dev);
2190}
2191
2192/*---------------------------------------------------------------------------*/
2193
2194static void __devexit
2195net2272_remove(struct net2272 *dev)
2196{
2197 usb_del_gadget_udc(&dev->gadget);
2198
2199 /* start with the driver above us */
2200 if (dev->driver) {
2201 /* should have been done already by driver model core */
2202 dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
2203 dev->driver->driver.name);
2204 usb_gadget_unregister_driver(dev->driver);
2205 }
2206
2207 free_irq(dev->irq, dev);
2208 iounmap(dev->base_addr);
2209
2210 device_unregister(&dev->gadget.dev);
2211 device_remove_file(dev->dev, &dev_attr_registers);
2212
2213 dev_info(dev->dev, "unbind\n");
2214}
2215
2216static struct net2272 * __devinit
2217net2272_probe_init(struct device *dev, unsigned int irq)
2218{
2219 struct net2272 *ret;
2220
2221 if (!irq) {
2222 dev_dbg(dev, "No IRQ!\n");
2223 return ERR_PTR(-ENODEV);
2224 }
2225
2226 /* alloc, and start init */
2227 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2228 if (!ret)
2229 return ERR_PTR(-ENOMEM);
2230
2231 spin_lock_init(&ret->lock);
2232 ret->irq = irq;
2233 ret->dev = dev;
2234 ret->gadget.ops = &net2272_ops;
2235 ret->gadget.max_speed = USB_SPEED_HIGH;
2236
2237 /* the "gadget" abstracts/virtualizes the controller */
2238 dev_set_name(&ret->gadget.dev, "gadget");
2239 ret->gadget.dev.parent = dev;
2240 ret->gadget.dev.dma_mask = dev->dma_mask;
2241 ret->gadget.dev.release = net2272_gadget_release;
2242 ret->gadget.name = driver_name;
2243
2244 return ret;
2245}
2246
2247static int __devinit
2248net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2249{
2250 int ret;
2251
2252 /* See if there... */
2253 if (net2272_present(dev)) {
2254 dev_warn(dev->dev, "2272 not found!\n");
2255 ret = -ENODEV;
2256 goto err;
2257 }
2258
2259 net2272_usb_reset(dev);
2260 net2272_usb_reinit(dev);
2261
2262 ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2263 if (ret) {
2264 dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2265 goto err;
2266 }
2267
2268 dev->chiprev = net2272_read(dev, CHIPREV_2272);
2269
2270 /* done */
2271 dev_info(dev->dev, "%s\n", driver_desc);
2272 dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2273 dev->irq, dev->base_addr, dev->chiprev,
2274 dma_mode_string());
2275 dev_info(dev->dev, "version: %s\n", driver_vers);
2276
2277 ret = device_register(&dev->gadget.dev);
2278 if (ret)
2279 goto err_irq;
2280 ret = device_create_file(dev->dev, &dev_attr_registers);
2281 if (ret)
2282 goto err_dev_reg;
2283
2284 ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
2285 if (ret)
2286 goto err_add_udc;
2287
2288 return 0;
2289
2290err_add_udc:
2291 device_remove_file(dev->dev, &dev_attr_registers);
2292 err_dev_reg:
2293 device_unregister(&dev->gadget.dev);
2294 err_irq:
2295 free_irq(dev->irq, dev);
2296 err:
2297 return ret;
2298}
2299
2300#ifdef CONFIG_PCI
2301
2302/*
2303 * wrap this driver around the specified device, but
2304 * don't respond over USB until a gadget driver binds to us
2305 */
2306
2307static int __devinit
2308net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2309{
2310 unsigned long resource, len, tmp;
2311 void __iomem *mem_mapped_addr[4];
2312 int ret, i;
2313
2314 /*
2315 * BAR 0 holds PLX 9054 config registers
2316 * BAR 1 is i/o memory; unused here
2317 * BAR 2 holds EPLD config registers
2318 * BAR 3 holds NET2272 registers
2319 */
2320
2321 /* Find and map all address spaces */
2322 for (i = 0; i < 4; ++i) {
2323 if (i == 1)
2324 continue; /* BAR1 unused */
2325
2326 resource = pci_resource_start(pdev, i);
2327 len = pci_resource_len(pdev, i);
2328
2329 if (!request_mem_region(resource, len, driver_name)) {
2330 dev_dbg(dev->dev, "controller already in use\n");
2331 ret = -EBUSY;
2332 goto err;
2333 }
2334
2335 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2336 if (mem_mapped_addr[i] == NULL) {
2337 release_mem_region(resource, len);
2338 dev_dbg(dev->dev, "can't map memory\n");
2339 ret = -EFAULT;
2340 goto err;
2341 }
2342 }
2343
2344 dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2345 dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2346 dev->base_addr = mem_mapped_addr[3];
2347
2348 /* Set PLX 9054 bus width (16 bits) */
2349 tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2350 writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2351 dev->rdk1.plx9054_base_addr + LBRD1);
2352
2353 /* Enable PLX 9054 Interrupts */
2354 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2355 (1 << PCI_INTERRUPT_ENABLE) |
2356 (1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2357 dev->rdk1.plx9054_base_addr + INTCSR);
2358
2359 writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2360 dev->rdk1.plx9054_base_addr + DMACSR0);
2361
2362 /* reset */
2363 writeb((1 << EPLD_DMA_ENABLE) |
2364 (1 << DMA_CTL_DACK) |
2365 (1 << DMA_TIMEOUT_ENABLE) |
2366 (1 << USER) |
2367 (0 << MPX_MODE) |
2368 (1 << BUSWIDTH) |
2369 (1 << NET2272_RESET),
2370 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2371
2372 mb();
2373 writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2374 ~(1 << NET2272_RESET),
2375 dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2376 udelay(200);
2377
2378 return 0;
2379
2380 err:
2381 while (--i >= 0) {
2382 iounmap(mem_mapped_addr[i]);
2383 release_mem_region(pci_resource_start(pdev, i),
2384 pci_resource_len(pdev, i));
2385 }
2386
2387 return ret;
2388}
2389
2390static int __devinit
2391net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2392{
2393 unsigned long resource, len;
2394 void __iomem *mem_mapped_addr[2];
2395 int ret, i;
2396
2397 /*
2398 * BAR 0 holds FGPA config registers
2399 * BAR 1 holds NET2272 registers
2400 */
2401
2402 /* Find and map all address spaces, bar2-3 unused in rdk 2 */
2403 for (i = 0; i < 2; ++i) {
2404 resource = pci_resource_start(pdev, i);
2405 len = pci_resource_len(pdev, i);
2406
2407 if (!request_mem_region(resource, len, driver_name)) {
2408 dev_dbg(dev->dev, "controller already in use\n");
2409 ret = -EBUSY;
2410 goto err;
2411 }
2412
2413 mem_mapped_addr[i] = ioremap_nocache(resource, len);
2414 if (mem_mapped_addr[i] == NULL) {
2415 release_mem_region(resource, len);
2416 dev_dbg(dev->dev, "can't map memory\n");
2417 ret = -EFAULT;
2418 goto err;
2419 }
2420 }
2421
2422 dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2423 dev->base_addr = mem_mapped_addr[1];
2424
2425 mb();
2426 /* Set 2272 bus width (16 bits) and reset */
2427 writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2428 udelay(200);
2429 writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2430 /* Print fpga version number */
2431 dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2432 readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2433 /* Enable FPGA Interrupts */
2434 writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2435
2436 return 0;
2437
2438 err:
2439 while (--i >= 0) {
2440 iounmap(mem_mapped_addr[i]);
2441 release_mem_region(pci_resource_start(pdev, i),
2442 pci_resource_len(pdev, i));
2443 }
2444
2445 return ret;
2446}
2447
2448static int __devinit
2449net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2450{
2451 struct net2272 *dev;
2452 int ret;
2453
2454 dev = net2272_probe_init(&pdev->dev, pdev->irq);
2455 if (IS_ERR(dev))
2456 return PTR_ERR(dev);
2457 dev->dev_id = pdev->device;
2458
2459 if (pci_enable_device(pdev) < 0) {
2460 ret = -ENODEV;
2461 goto err_free;
2462 }
2463
2464 pci_set_master(pdev);
2465
2466 switch (pdev->device) {
2467 case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2468 case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2469 default: BUG();
2470 }
2471 if (ret)
2472 goto err_pci;
2473
2474 ret = net2272_probe_fin(dev, 0);
2475 if (ret)
2476 goto err_pci;
2477
2478 pci_set_drvdata(pdev, dev);
2479
2480 return 0;
2481
2482 err_pci:
2483 pci_disable_device(pdev);
2484 err_free:
2485 kfree(dev);
2486
2487 return ret;
2488}
2489
2490static void __devexit
2491net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2492{
2493 int i;
2494
2495 /* disable PLX 9054 interrupts */
2496 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2497 ~(1 << PCI_INTERRUPT_ENABLE),
2498 dev->rdk1.plx9054_base_addr + INTCSR);
2499
2500 /* clean up resources allocated during probe() */
2501 iounmap(dev->rdk1.plx9054_base_addr);
2502 iounmap(dev->rdk1.epld_base_addr);
2503
2504 for (i = 0; i < 4; ++i) {
2505 if (i == 1)
2506 continue; /* BAR1 unused */
2507 release_mem_region(pci_resource_start(pdev, i),
2508 pci_resource_len(pdev, i));
2509 }
2510}
2511
2512static void __devexit
2513net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2514{
2515 int i;
2516
2517 /* disable fpga interrupts
2518 writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2519 ~(1 << PCI_INTERRUPT_ENABLE),
2520 dev->rdk1.plx9054_base_addr + INTCSR);
2521 */
2522
2523 /* clean up resources allocated during probe() */
2524 iounmap(dev->rdk2.fpga_base_addr);
2525
2526 for (i = 0; i < 2; ++i)
2527 release_mem_region(pci_resource_start(pdev, i),
2528 pci_resource_len(pdev, i));
2529}
2530
2531static void __devexit
2532net2272_pci_remove(struct pci_dev *pdev)
2533{
2534 struct net2272 *dev = pci_get_drvdata(pdev);
2535
2536 net2272_remove(dev);
2537
2538 switch (pdev->device) {
2539 case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2540 case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2541 default: BUG();
2542 }
2543
2544 pci_disable_device(pdev);
2545
2546 kfree(dev);
2547}
2548
2549/* Table of matching PCI IDs */
2550static struct pci_device_id __devinitdata pci_ids[] = {
2551 { /* RDK 1 card */
2552 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2553 .class_mask = 0,
2554 .vendor = PCI_VENDOR_ID_PLX,
2555 .device = PCI_DEVICE_ID_RDK1,
2556 .subvendor = PCI_ANY_ID,
2557 .subdevice = PCI_ANY_ID,
2558 },
2559 { /* RDK 2 card */
2560 .class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2561 .class_mask = 0,
2562 .vendor = PCI_VENDOR_ID_PLX,
2563 .device = PCI_DEVICE_ID_RDK2,
2564 .subvendor = PCI_ANY_ID,
2565 .subdevice = PCI_ANY_ID,
2566 },
2567 { }
2568};
2569MODULE_DEVICE_TABLE(pci, pci_ids);
2570
2571static struct pci_driver net2272_pci_driver = {
2572 .name = driver_name,
2573 .id_table = pci_ids,
2574
2575 .probe = net2272_pci_probe,
2576 .remove = __devexit_p(net2272_pci_remove),
2577};
2578
2579static int net2272_pci_register(void)
2580{
2581 return pci_register_driver(&net2272_pci_driver);
2582}
2583
2584static void net2272_pci_unregister(void)
2585{
2586 pci_unregister_driver(&net2272_pci_driver);
2587}
2588
2589#else
2590static inline int net2272_pci_register(void) { return 0; }
2591static inline void net2272_pci_unregister(void) { }
2592#endif
2593
2594/*---------------------------------------------------------------------------*/
2595
2596static int __devinit
2597net2272_plat_probe(struct platform_device *pdev)
2598{
2599 struct net2272 *dev;
2600 int ret;
2601 unsigned int irqflags;
2602 resource_size_t base, len;
2603 struct resource *iomem, *iomem_bus, *irq_res;
2604
2605 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2606 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2607 iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2608 if (!irq_res || !iomem) {
2609 dev_err(&pdev->dev, "must provide irq/base addr");
2610 return -EINVAL;
2611 }
2612
2613 dev = net2272_probe_init(&pdev->dev, irq_res->start);
2614 if (IS_ERR(dev))
2615 return PTR_ERR(dev);
2616
2617 irqflags = 0;
2618 if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2619 irqflags |= IRQF_TRIGGER_RISING;
2620 if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2621 irqflags |= IRQF_TRIGGER_FALLING;
2622 if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2623 irqflags |= IRQF_TRIGGER_HIGH;
2624 if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2625 irqflags |= IRQF_TRIGGER_LOW;
2626
2627 base = iomem->start;
2628 len = resource_size(iomem);
2629 if (iomem_bus)
2630 dev->base_shift = iomem_bus->start;
2631
2632 if (!request_mem_region(base, len, driver_name)) {
2633 dev_dbg(dev->dev, "get request memory region!\n");
2634 ret = -EBUSY;
2635 goto err;
2636 }
2637 dev->base_addr = ioremap_nocache(base, len);
2638 if (!dev->base_addr) {
2639 dev_dbg(dev->dev, "can't map memory\n");
2640 ret = -EFAULT;
2641 goto err_req;
2642 }
2643
2644 ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2645 if (ret)
2646 goto err_io;
2647
2648 platform_set_drvdata(pdev, dev);
2649 dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2650 (net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2651
2652 return 0;
2653
2654 err_io:
2655 iounmap(dev->base_addr);
2656 err_req:
2657 release_mem_region(base, len);
2658 err:
2659 return ret;
2660}
2661
2662static int __devexit
2663net2272_plat_remove(struct platform_device *pdev)
2664{
2665 struct net2272 *dev = platform_get_drvdata(pdev);
2666
2667 net2272_remove(dev);
2668
2669 release_mem_region(pdev->resource[0].start,
2670 resource_size(&pdev->resource[0]));
2671
2672 kfree(dev);
2673
2674 return 0;
2675}
2676
2677static struct platform_driver net2272_plat_driver = {
2678 .probe = net2272_plat_probe,
2679 .remove = __devexit_p(net2272_plat_remove),
2680 .driver = {
2681 .name = driver_name,
2682 .owner = THIS_MODULE,
2683 },
2684 /* FIXME .suspend, .resume */
2685};
2686MODULE_ALIAS("platform:net2272");
2687
2688static int __init net2272_init(void)
2689{
2690 int ret;
2691
2692 ret = net2272_pci_register();
2693 if (ret)
2694 return ret;
2695 ret = platform_driver_register(&net2272_plat_driver);
2696 if (ret)
2697 goto err_pci;
2698 return ret;
2699
2700err_pci:
2701 net2272_pci_unregister();
2702 return ret;
2703}
2704module_init(net2272_init);
2705
2706static void __exit net2272_cleanup(void)
2707{
2708 net2272_pci_unregister();
2709 platform_driver_unregister(&net2272_plat_driver);
2710}
2711module_exit(net2272_cleanup);
2712
2713MODULE_DESCRIPTION(DRIVER_DESC);
2714MODULE_AUTHOR("PLX Technology, Inc.");
2715MODULE_LICENSE("GPL");