Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Driver for PLX NET2272 USB device controller
   4 *
   5 * Copyright (C) 2005-2006 PLX Technology, Inc.
   6 * Copyright (C) 2006-2011 Analog Devices, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/device.h>
  11#include <linux/errno.h>
 
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/ioport.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/pci.h>
  21#include <linux/platform_device.h>
  22#include <linux/prefetch.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <linux/usb.h>
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29
  30#include <asm/byteorder.h>
  31#include <asm/unaligned.h>
  32
  33#include "net2272.h"
  34
  35#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  36
  37static const char driver_name[] = "net2272";
  38static const char driver_vers[] = "2006 October 17/mainline";
  39static const char driver_desc[] = DRIVER_DESC;
  40
  41static const char ep0name[] = "ep0";
  42static const char * const ep_name[] = {
  43	ep0name,
  44	"ep-a", "ep-b", "ep-c",
  45};
  46
  47#ifdef CONFIG_USB_NET2272_DMA
  48/*
  49 * use_dma: the NET2272 can use an external DMA controller.
  50 * Note that since there is no generic DMA api, some functions,
  51 * notably request_dma, start_dma, and cancel_dma will need to be
  52 * modified for your platform's particular dma controller.
  53 *
  54 * If use_dma is disabled, pio will be used instead.
  55 */
  56static bool use_dma = false;
  57module_param(use_dma, bool, 0644);
  58
  59/*
  60 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  61 * The NET2272 can only use dma for a single endpoint at a time.
  62 * At some point this could be modified to allow either endpoint
  63 * to take control of dma as it becomes available.
  64 *
  65 * Note that DMA should not be used on OUT endpoints unless it can
  66 * be guaranteed that no short packets will arrive on an IN endpoint
  67 * while the DMA operation is pending.  Otherwise the OUT DMA will
  68 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  69 */
  70static ushort dma_ep = 1;
  71module_param(dma_ep, ushort, 0644);
  72
  73/*
  74 * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
  75 *	mode 0 == Slow DREQ mode
  76 *	mode 1 == Fast DREQ mode
  77 *	mode 2 == Burst mode
  78 */
  79static ushort dma_mode = 2;
  80module_param(dma_mode, ushort, 0644);
  81#else
  82#define use_dma 0
  83#define dma_ep 1
  84#define dma_mode 2
  85#endif
  86
  87/*
  88 * fifo_mode: net2272 buffer configuration:
  89 *      mode 0 == ep-{a,b,c} 512db each
  90 *      mode 1 == ep-a 1k, ep-{b,c} 512db
  91 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
  92 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
  93 */
  94static ushort fifo_mode;
  95module_param(fifo_mode, ushort, 0644);
  96
  97/*
  98 * enable_suspend: When enabled, the driver will respond to
  99 * USB suspend requests by powering down the NET2272.  Otherwise,
 100 * USB suspend requests will be ignored.  This is acceptable for
 101 * self-powered devices.  For bus powered devices set this to 1.
 102 */
 103static ushort enable_suspend;
 104module_param(enable_suspend, ushort, 0644);
 105
 106static void assert_out_naking(struct net2272_ep *ep, const char *where)
 107{
 108	u8 tmp;
 109
 110#ifndef DEBUG
 111	return;
 112#endif
 113
 114	tmp = net2272_ep_read(ep, EP_STAT0);
 115	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 116		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 117			ep->ep.name, where, tmp);
 118		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 119	}
 120}
 121#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 122
 123static void stop_out_naking(struct net2272_ep *ep)
 124{
 125	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 126
 127	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 128		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 129}
 130
 131#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 132
 133static char *type_string(u8 bmAttributes)
 134{
 135	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 136	case USB_ENDPOINT_XFER_BULK: return "bulk";
 137	case USB_ENDPOINT_XFER_ISOC: return "iso";
 138	case USB_ENDPOINT_XFER_INT:  return "intr";
 139	default:                     return "control";
 140	}
 141}
 142
 143static char *buf_state_string(unsigned state)
 144{
 145	switch (state) {
 146	case BUFF_FREE:  return "free";
 147	case BUFF_VALID: return "valid";
 148	case BUFF_LCL:   return "local";
 149	case BUFF_USB:   return "usb";
 150	default:         return "unknown";
 151	}
 152}
 153
 154static char *dma_mode_string(void)
 155{
 156	if (!use_dma)
 157		return "PIO";
 158	switch (dma_mode) {
 159	case 0:  return "SLOW DREQ";
 160	case 1:  return "FAST DREQ";
 161	case 2:  return "BURST";
 162	default: return "invalid";
 163	}
 164}
 165
 166static void net2272_dequeue_all(struct net2272_ep *);
 167static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 168static int net2272_fifo_status(struct usb_ep *);
 169
 170static const struct usb_ep_ops net2272_ep_ops;
 171
 172/*---------------------------------------------------------------------------*/
 173
 174static int
 175net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 176{
 177	struct net2272 *dev;
 178	struct net2272_ep *ep;
 179	u32 max;
 180	u8 tmp;
 181	unsigned long flags;
 182
 183	ep = container_of(_ep, struct net2272_ep, ep);
 184	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 185			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 186		return -EINVAL;
 187	dev = ep->dev;
 188	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 189		return -ESHUTDOWN;
 190
 191	max = usb_endpoint_maxp(desc);
 192
 193	spin_lock_irqsave(&dev->lock, flags);
 194	_ep->maxpacket = max;
 195	ep->desc = desc;
 196
 197	/* net2272_ep_reset() has already been called */
 198	ep->stopped = 0;
 199	ep->wedged = 0;
 200
 201	/* set speed-dependent max packet */
 202	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 203	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 204
 205	/* set type, direction, address; reset fifo counters */
 206	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 207	tmp = usb_endpoint_type(desc);
 208	if (usb_endpoint_xfer_bulk(desc)) {
 209		/* catch some particularly blatant driver bugs */
 210		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 211		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 212			spin_unlock_irqrestore(&dev->lock, flags);
 213			return -ERANGE;
 214		}
 215	}
 216	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 217	tmp <<= ENDPOINT_TYPE;
 218	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 219	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 220	tmp |= (1 << ENDPOINT_ENABLE);
 221
 222	/* for OUT transfers, block the rx fifo until a read is posted */
 223	ep->is_in = usb_endpoint_dir_in(desc);
 224	if (!ep->is_in)
 225		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 226
 227	net2272_ep_write(ep, EP_CFG, tmp);
 228
 229	/* enable irqs */
 230	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 231	net2272_write(dev, IRQENB0, tmp);
 232
 233	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 234		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 235		| net2272_ep_read(ep, EP_IRQENB);
 236	net2272_ep_write(ep, EP_IRQENB, tmp);
 237
 238	tmp = desc->bEndpointAddress;
 239	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 240		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 241		type_string(desc->bmAttributes), max,
 242		net2272_ep_read(ep, EP_CFG));
 243
 244	spin_unlock_irqrestore(&dev->lock, flags);
 245	return 0;
 246}
 247
 248static void net2272_ep_reset(struct net2272_ep *ep)
 249{
 250	u8 tmp;
 251
 252	ep->desc = NULL;
 253	INIT_LIST_HEAD(&ep->queue);
 254
 255	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 256	ep->ep.ops = &net2272_ep_ops;
 257
 258	/* disable irqs, endpoint */
 259	net2272_ep_write(ep, EP_IRQENB, 0);
 260
 261	/* init to our chosen defaults, notably so that we NAK OUT
 262	 * packets until the driver queues a read.
 263	 */
 264	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 265	net2272_ep_write(ep, EP_RSPSET, tmp);
 266
 267	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 268	if (ep->num != 0)
 269		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 270
 271	net2272_ep_write(ep, EP_RSPCLR, tmp);
 272
 273	/* scrub most status bits, and flush any fifo state */
 274	net2272_ep_write(ep, EP_STAT0,
 275			  (1 << DATA_IN_TOKEN_INTERRUPT)
 276			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 277			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 278			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 279			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 280
 281	net2272_ep_write(ep, EP_STAT1,
 282			    (1 << TIMEOUT)
 283			  | (1 << USB_OUT_ACK_SENT)
 284			  | (1 << USB_OUT_NAK_SENT)
 285			  | (1 << USB_IN_ACK_RCVD)
 286			  | (1 << USB_IN_NAK_SENT)
 287			  | (1 << USB_STALL_SENT)
 288			  | (1 << LOCAL_OUT_ZLP)
 289			  | (1 << BUFFER_FLUSH));
 290
 291	/* fifo size is handled separately */
 292}
 293
 294static int net2272_disable(struct usb_ep *_ep)
 295{
 296	struct net2272_ep *ep;
 297	unsigned long flags;
 298
 299	ep = container_of(_ep, struct net2272_ep, ep);
 300	if (!_ep || !ep->desc || _ep->name == ep0name)
 301		return -EINVAL;
 302
 303	spin_lock_irqsave(&ep->dev->lock, flags);
 304	net2272_dequeue_all(ep);
 305	net2272_ep_reset(ep);
 306
 307	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 308
 309	spin_unlock_irqrestore(&ep->dev->lock, flags);
 310	return 0;
 311}
 312
 313/*---------------------------------------------------------------------------*/
 314
 315static struct usb_request *
 316net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 317{
 318	struct net2272_request *req;
 319
 320	if (!_ep)
 321		return NULL;
 322
 323	req = kzalloc(sizeof(*req), gfp_flags);
 324	if (!req)
 325		return NULL;
 326
 327	INIT_LIST_HEAD(&req->queue);
 328
 329	return &req->req;
 330}
 331
 332static void
 333net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 334{
 335	struct net2272_request *req;
 336
 337	if (!_ep || !_req)
 338		return;
 339
 340	req = container_of(_req, struct net2272_request, req);
 341	WARN_ON(!list_empty(&req->queue));
 342	kfree(req);
 343}
 344
 345static void
 346net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 347{
 348	struct net2272 *dev;
 349	unsigned stopped = ep->stopped;
 350
 351	if (ep->num == 0) {
 352		if (ep->dev->protocol_stall) {
 353			ep->stopped = 1;
 354			set_halt(ep);
 355		}
 356		allow_status(ep);
 357	}
 358
 359	list_del_init(&req->queue);
 360
 361	if (req->req.status == -EINPROGRESS)
 362		req->req.status = status;
 363	else
 364		status = req->req.status;
 365
 366	dev = ep->dev;
 367	if (use_dma && ep->dma)
 368		usb_gadget_unmap_request(&dev->gadget, &req->req,
 369				ep->is_in);
 370
 371	if (status && status != -ESHUTDOWN)
 372		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 373			ep->ep.name, &req->req, status,
 374			req->req.actual, req->req.length, req->req.buf);
 375
 376	/* don't modify queue heads during completion callback */
 377	ep->stopped = 1;
 378	spin_unlock(&dev->lock);
 379	usb_gadget_giveback_request(&ep->ep, &req->req);
 380	spin_lock(&dev->lock);
 381	ep->stopped = stopped;
 382}
 383
 384static int
 385net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 386	struct net2272_request *req, unsigned max)
 387{
 388	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 389	u16 *bufp;
 390	unsigned length, count;
 391	u8 tmp;
 392
 393	length = min(req->req.length - req->req.actual, max);
 394	req->req.actual += length;
 395
 396	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 397		ep->ep.name, req, max, length,
 398		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 399
 400	count = length;
 401	bufp = (u16 *)buf;
 402
 403	while (likely(count >= 2)) {
 404		/* no byte-swap required; chip endian set during init */
 405		writew(*bufp++, ep_data);
 406		count -= 2;
 407	}
 408	buf = (u8 *)bufp;
 409
 410	/* write final byte by placing the NET2272 into 8-bit mode */
 411	if (unlikely(count)) {
 412		tmp = net2272_read(ep->dev, LOCCTL);
 413		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 414		writeb(*buf, ep_data);
 415		net2272_write(ep->dev, LOCCTL, tmp);
 416	}
 417	return length;
 418}
 419
 420/* returns: 0: still running, 1: completed, negative: errno */
 421static int
 422net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 423{
 424	u8 *buf;
 425	unsigned count, max;
 426	int status;
 427
 428	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 429		ep->ep.name, req->req.actual, req->req.length);
 430
 431	/*
 432	 * Keep loading the endpoint until the final packet is loaded,
 433	 * or the endpoint buffer is full.
 434	 */
 435 top:
 436	/*
 437	 * Clear interrupt status
 438	 *  - Packet Transmitted interrupt will become set again when the
 439	 *    host successfully takes another packet
 440	 */
 441	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 442	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 443		buf = req->req.buf + req->req.actual;
 444		prefetch(buf);
 445
 446		/* force pagesel */
 447		net2272_ep_read(ep, EP_STAT0);
 448
 449		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 450			(net2272_ep_read(ep, EP_AVAIL0));
 451
 452		if (max < ep->ep.maxpacket)
 453			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 454				| (net2272_ep_read(ep, EP_AVAIL0));
 455
 456		count = net2272_write_packet(ep, buf, req, max);
 457		/* see if we are done */
 458		if (req->req.length == req->req.actual) {
 459			/* validate short or zlp packet */
 460			if (count < ep->ep.maxpacket)
 461				set_fifo_bytecount(ep, 0);
 462			net2272_done(ep, req, 0);
 463
 464			if (!list_empty(&ep->queue)) {
 465				req = list_entry(ep->queue.next,
 466						struct net2272_request,
 467						queue);
 468				status = net2272_kick_dma(ep, req);
 469
 470				if (status < 0)
 471					if ((net2272_ep_read(ep, EP_STAT0)
 472							& (1 << BUFFER_EMPTY)))
 473						goto top;
 474			}
 475			return 1;
 476		}
 477		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 478	}
 479	return 0;
 480}
 481
 482static void
 483net2272_out_flush(struct net2272_ep *ep)
 484{
 485	ASSERT_OUT_NAKING(ep);
 486
 487	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 488			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 489	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 490}
 491
 492static int
 493net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 494	struct net2272_request *req, unsigned avail)
 495{
 496	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 497	unsigned is_short;
 498	u16 *bufp;
 499
 500	req->req.actual += avail;
 501
 502	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 503		ep->ep.name, req, avail,
 504		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 505
 506	is_short = (avail < ep->ep.maxpacket);
 507
 508	if (unlikely(avail == 0)) {
 509		/* remove any zlp from the buffer */
 510		(void)readw(ep_data);
 511		return is_short;
 512	}
 513
 514	/* Ensure we get the final byte */
 515	if (unlikely(avail % 2))
 516		avail++;
 517	bufp = (u16 *)buf;
 518
 519	do {
 520		*bufp++ = readw(ep_data);
 521		avail -= 2;
 522	} while (avail);
 523
 524	/*
 525	 * To avoid false endpoint available race condition must read
 526	 * ep stat0 twice in the case of a short transfer
 527	 */
 528	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 529		net2272_ep_read(ep, EP_STAT0);
 530
 531	return is_short;
 532}
 533
 534static int
 535net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 536{
 537	u8 *buf;
 538	unsigned is_short;
 539	int count;
 540	int tmp;
 541	int cleanup = 0;
 
 542
 543	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 544		ep->ep.name, req->req.actual, req->req.length);
 545
 546 top:
 547	do {
 548		buf = req->req.buf + req->req.actual;
 549		prefetchw(buf);
 550
 551		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 552			| net2272_ep_read(ep, EP_AVAIL0);
 553
 554		net2272_ep_write(ep, EP_STAT0,
 555			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 556			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 557
 558		tmp = req->req.length - req->req.actual;
 559
 560		if (count > tmp) {
 561			if ((tmp % ep->ep.maxpacket) != 0) {
 562				dev_err(ep->dev->dev,
 563					"%s out fifo %d bytes, expected %d\n",
 564					ep->ep.name, count, tmp);
 565				cleanup = 1;
 566			}
 567			count = (tmp > 0) ? tmp : 0;
 568		}
 569
 570		is_short = net2272_read_packet(ep, buf, req, count);
 571
 572		/* completion */
 573		if (unlikely(cleanup || is_short ||
 574				req->req.actual == req->req.length)) {
 
 575
 576			if (cleanup) {
 577				net2272_out_flush(ep);
 578				net2272_done(ep, req, -EOVERFLOW);
 579			} else
 580				net2272_done(ep, req, 0);
 581
 582			/* re-initialize endpoint transfer registers
 583			 * otherwise they may result in erroneous pre-validation
 584			 * for subsequent control reads
 585			 */
 586			if (unlikely(ep->num == 0)) {
 587				net2272_ep_write(ep, EP_TRANSFER2, 0);
 588				net2272_ep_write(ep, EP_TRANSFER1, 0);
 589				net2272_ep_write(ep, EP_TRANSFER0, 0);
 590			}
 591
 592			if (!list_empty(&ep->queue)) {
 593				int status;
 594
 595				req = list_entry(ep->queue.next,
 596					struct net2272_request, queue);
 597				status = net2272_kick_dma(ep, req);
 598				if ((status < 0) &&
 599				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 600					goto top;
 601			}
 602			return 1;
 603		}
 604	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 605
 606	return 0;
 607}
 608
 609static void
 610net2272_pio_advance(struct net2272_ep *ep)
 611{
 612	struct net2272_request *req;
 613
 614	if (unlikely(list_empty(&ep->queue)))
 615		return;
 616
 617	req = list_entry(ep->queue.next, struct net2272_request, queue);
 618	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 619}
 620
 621/* returns 0 on success, else negative errno */
 622static int
 623net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 624	unsigned len, unsigned dir)
 625{
 626	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 627		ep, buf, len, dir);
 628
 629	/* The NET2272 only supports a single dma channel */
 630	if (dev->dma_busy)
 631		return -EBUSY;
 632	/*
 633	 * EP_TRANSFER (used to determine the number of bytes received
 634	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 635	 */
 636	if ((dir == 1) && (len > 0x1000000))
 637		return -EINVAL;
 638
 639	dev->dma_busy = 1;
 640
 641	/* initialize platform's dma */
 642#ifdef CONFIG_USB_PCI
 643	/* NET2272 addr, buffer addr, length, etc. */
 644	switch (dev->dev_id) {
 645	case PCI_DEVICE_ID_RDK1:
 646		/* Setup PLX 9054 DMA mode */
 647		writel((1 << LOCAL_BUS_WIDTH) |
 648			(1 << TA_READY_INPUT_ENABLE) |
 649			(0 << LOCAL_BURST_ENABLE) |
 650			(1 << DONE_INTERRUPT_ENABLE) |
 651			(1 << LOCAL_ADDRESSING_MODE) |
 652			(1 << DEMAND_MODE) |
 653			(1 << DMA_EOT_ENABLE) |
 654			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 655			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 656			dev->rdk1.plx9054_base_addr + DMAMODE0);
 657
 658		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 659		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 660		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 661		writel((dir << DIRECTION_OF_TRANSFER) |
 662			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 663			dev->rdk1.plx9054_base_addr + DMADPR0);
 664		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 665			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 666			dev->rdk1.plx9054_base_addr + INTCSR);
 667
 668		break;
 669	}
 670#endif
 671
 672	net2272_write(dev, DMAREQ,
 673		(0 << DMA_BUFFER_VALID) |
 674		(1 << DMA_REQUEST_ENABLE) |
 675		(1 << DMA_CONTROL_DACK) |
 676		(dev->dma_eot_polarity << EOT_POLARITY) |
 677		(dev->dma_dack_polarity << DACK_POLARITY) |
 678		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 679		((ep >> 1) << DMA_ENDPOINT_SELECT));
 680
 681	(void) net2272_read(dev, SCRATCH);
 682
 683	return 0;
 684}
 685
 686static void
 687net2272_start_dma(struct net2272 *dev)
 688{
 689	/* start platform's dma controller */
 690#ifdef CONFIG_USB_PCI
 691	switch (dev->dev_id) {
 692	case PCI_DEVICE_ID_RDK1:
 693		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 694			dev->rdk1.plx9054_base_addr + DMACSR0);
 695		break;
 696	}
 697#endif
 698}
 699
 700/* returns 0 on success, else negative errno */
 701static int
 702net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 703{
 704	unsigned size;
 705	u8 tmp;
 706
 707	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 708		return -EINVAL;
 709
 710	/* don't use dma for odd-length transfers
 711	 * otherwise, we'd need to deal with the last byte with pio
 712	 */
 713	if (req->req.length & 1)
 714		return -EINVAL;
 715
 716	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 717		ep->ep.name, req, (unsigned long long) req->req.dma);
 718
 719	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 720
 721	/* The NET2272 can only use DMA on one endpoint at a time */
 722	if (ep->dev->dma_busy)
 723		return -EBUSY;
 724
 725	/* Make sure we only DMA an even number of bytes (we'll use
 726	 * pio to complete the transfer)
 727	 */
 728	size = req->req.length;
 729	size &= ~1;
 730
 731	/* device-to-host transfer */
 732	if (ep->is_in) {
 733		/* initialize platform's dma controller */
 734		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 735			/* unable to obtain DMA channel; return error and use pio mode */
 736			return -EBUSY;
 737		req->req.actual += size;
 738
 739	/* host-to-device transfer */
 740	} else {
 741		tmp = net2272_ep_read(ep, EP_STAT0);
 742
 743		/* initialize platform's dma controller */
 744		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 745			/* unable to obtain DMA channel; return error and use pio mode */
 746			return -EBUSY;
 747
 748		if (!(tmp & (1 << BUFFER_EMPTY)))
 749			ep->not_empty = 1;
 750		else
 751			ep->not_empty = 0;
 752
 753
 754		/* allow the endpoint's buffer to fill */
 755		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 756
 757		/* this transfer completed and data's already in the fifo
 758		 * return error so pio gets used.
 759		 */
 760		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 761
 762			/* deassert dreq */
 763			net2272_write(ep->dev, DMAREQ,
 764				(0 << DMA_BUFFER_VALID) |
 765				(0 << DMA_REQUEST_ENABLE) |
 766				(1 << DMA_CONTROL_DACK) |
 767				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 768				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 769				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 770				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 771
 772			return -EBUSY;
 773		}
 774	}
 775
 776	/* Don't use per-packet interrupts: use dma interrupts only */
 777	net2272_ep_write(ep, EP_IRQENB, 0);
 778
 779	net2272_start_dma(ep->dev);
 780
 781	return 0;
 782}
 783
 784static void net2272_cancel_dma(struct net2272 *dev)
 785{
 786#ifdef CONFIG_USB_PCI
 787	switch (dev->dev_id) {
 788	case PCI_DEVICE_ID_RDK1:
 789		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 790		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 791		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 792		         (1 << CHANNEL_DONE)))
 793			continue;	/* wait for dma to stabalize */
 794
 795		/* dma abort generates an interrupt */
 796		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 797			dev->rdk1.plx9054_base_addr + DMACSR0);
 798		break;
 799	}
 800#endif
 801
 802	dev->dma_busy = 0;
 803}
 804
 805/*---------------------------------------------------------------------------*/
 806
 807static int
 808net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 809{
 810	struct net2272_request *req;
 811	struct net2272_ep *ep;
 812	struct net2272 *dev;
 813	unsigned long flags;
 814	int status = -1;
 815	u8 s;
 816
 817	req = container_of(_req, struct net2272_request, req);
 818	if (!_req || !_req->complete || !_req->buf
 819			|| !list_empty(&req->queue))
 820		return -EINVAL;
 821	ep = container_of(_ep, struct net2272_ep, ep);
 822	if (!_ep || (!ep->desc && ep->num != 0))
 823		return -EINVAL;
 824	dev = ep->dev;
 825	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 826		return -ESHUTDOWN;
 827
 828	/* set up dma mapping in case the caller didn't */
 829	if (use_dma && ep->dma) {
 830		status = usb_gadget_map_request(&dev->gadget, _req,
 831				ep->is_in);
 832		if (status)
 833			return status;
 834	}
 835
 836	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 837		_ep->name, _req, _req->length, _req->buf,
 838		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 839
 840	spin_lock_irqsave(&dev->lock, flags);
 841
 842	_req->status = -EINPROGRESS;
 843	_req->actual = 0;
 844
 845	/* kickstart this i/o queue? */
 846	if (list_empty(&ep->queue) && !ep->stopped) {
 847		/* maybe there's no control data, just status ack */
 848		if (ep->num == 0 && _req->length == 0) {
 849			net2272_done(ep, req, 0);
 850			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 851			goto done;
 852		}
 853
 854		/* Return zlp, don't let it block subsequent packets */
 855		s = net2272_ep_read(ep, EP_STAT0);
 856		if (s & (1 << BUFFER_EMPTY)) {
 857			/* Buffer is empty check for a blocking zlp, handle it */
 858			if ((s & (1 << NAK_OUT_PACKETS)) &&
 859			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 860				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 861				/*
 862				 * Request is going to terminate with a short packet ...
 863				 * hope the client is ready for it!
 864				 */
 865				status = net2272_read_fifo(ep, req);
 866				/* clear short packet naking */
 867				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 868				goto done;
 869			}
 870		}
 871
 872		/* try dma first */
 873		status = net2272_kick_dma(ep, req);
 874
 875		if (status < 0) {
 876			/* dma failed (most likely in use by another endpoint)
 877			 * fallback to pio
 878			 */
 879			status = 0;
 880
 881			if (ep->is_in)
 882				status = net2272_write_fifo(ep, req);
 883			else {
 884				s = net2272_ep_read(ep, EP_STAT0);
 885				if ((s & (1 << BUFFER_EMPTY)) == 0)
 886					status = net2272_read_fifo(ep, req);
 887			}
 888
 889			if (unlikely(status != 0)) {
 890				if (status > 0)
 891					status = 0;
 892				req = NULL;
 893			}
 894		}
 895	}
 896	if (likely(req))
 897		list_add_tail(&req->queue, &ep->queue);
 898
 899	if (likely(!list_empty(&ep->queue)))
 900		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 901 done:
 902	spin_unlock_irqrestore(&dev->lock, flags);
 903
 904	return 0;
 905}
 906
 907/* dequeue ALL requests */
 908static void
 909net2272_dequeue_all(struct net2272_ep *ep)
 910{
 911	struct net2272_request *req;
 912
 913	/* called with spinlock held */
 914	ep->stopped = 1;
 915
 916	while (!list_empty(&ep->queue)) {
 917		req = list_entry(ep->queue.next,
 918				struct net2272_request,
 919				queue);
 920		net2272_done(ep, req, -ESHUTDOWN);
 921	}
 922}
 923
 924/* dequeue JUST ONE request */
 925static int
 926net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 927{
 928	struct net2272_ep *ep;
 929	struct net2272_request *req = NULL, *iter;
 930	unsigned long flags;
 931	int stopped;
 932
 933	ep = container_of(_ep, struct net2272_ep, ep);
 934	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 935		return -EINVAL;
 936
 937	spin_lock_irqsave(&ep->dev->lock, flags);
 938	stopped = ep->stopped;
 939	ep->stopped = 1;
 940
 941	/* make sure it's still queued on this endpoint */
 942	list_for_each_entry(iter, &ep->queue, queue) {
 943		if (&iter->req != _req)
 944			continue;
 945		req = iter;
 946		break;
 947	}
 948	if (!req) {
 949		ep->stopped = stopped;
 950		spin_unlock_irqrestore(&ep->dev->lock, flags);
 951		return -EINVAL;
 952	}
 953
 954	/* queue head may be partially complete */
 955	if (ep->queue.next == &req->queue) {
 956		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 957		net2272_done(ep, req, -ECONNRESET);
 958	}
 
 959	ep->stopped = stopped;
 960
 961	spin_unlock_irqrestore(&ep->dev->lock, flags);
 962	return 0;
 963}
 964
 965/*---------------------------------------------------------------------------*/
 966
 967static int
 968net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 969{
 970	struct net2272_ep *ep;
 971	unsigned long flags;
 972	int ret = 0;
 973
 974	ep = container_of(_ep, struct net2272_ep, ep);
 975	if (!_ep || (!ep->desc && ep->num != 0))
 976		return -EINVAL;
 977	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 978		return -ESHUTDOWN;
 979	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 980		return -EINVAL;
 981
 982	spin_lock_irqsave(&ep->dev->lock, flags);
 983	if (!list_empty(&ep->queue))
 984		ret = -EAGAIN;
 985	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
 986		ret = -EAGAIN;
 987	else {
 988		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
 989			value ? "set" : "clear",
 990			wedged ? "wedge" : "halt");
 991		/* set/clear */
 992		if (value) {
 993			if (ep->num == 0)
 994				ep->dev->protocol_stall = 1;
 995			else
 996				set_halt(ep);
 997			if (wedged)
 998				ep->wedged = 1;
 999		} else {
1000			clear_halt(ep);
1001			ep->wedged = 0;
1002		}
1003	}
1004	spin_unlock_irqrestore(&ep->dev->lock, flags);
1005
1006	return ret;
1007}
1008
1009static int
1010net2272_set_halt(struct usb_ep *_ep, int value)
1011{
1012	return net2272_set_halt_and_wedge(_ep, value, 0);
1013}
1014
1015static int
1016net2272_set_wedge(struct usb_ep *_ep)
1017{
1018	if (!_ep || _ep->name == ep0name)
1019		return -EINVAL;
1020	return net2272_set_halt_and_wedge(_ep, 1, 1);
1021}
1022
1023static int
1024net2272_fifo_status(struct usb_ep *_ep)
1025{
1026	struct net2272_ep *ep;
1027	u16 avail;
1028
1029	ep = container_of(_ep, struct net2272_ep, ep);
1030	if (!_ep || (!ep->desc && ep->num != 0))
1031		return -ENODEV;
1032	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1033		return -ESHUTDOWN;
1034
1035	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1036	avail |= net2272_ep_read(ep, EP_AVAIL0);
1037	if (avail > ep->fifo_size)
1038		return -EOVERFLOW;
1039	if (ep->is_in)
1040		avail = ep->fifo_size - avail;
1041	return avail;
1042}
1043
1044static void
1045net2272_fifo_flush(struct usb_ep *_ep)
1046{
1047	struct net2272_ep *ep;
1048
1049	ep = container_of(_ep, struct net2272_ep, ep);
1050	if (!_ep || (!ep->desc && ep->num != 0))
1051		return;
1052	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1053		return;
1054
1055	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1056}
1057
1058static const struct usb_ep_ops net2272_ep_ops = {
1059	.enable        = net2272_enable,
1060	.disable       = net2272_disable,
1061
1062	.alloc_request = net2272_alloc_request,
1063	.free_request  = net2272_free_request,
1064
1065	.queue         = net2272_queue,
1066	.dequeue       = net2272_dequeue,
1067
1068	.set_halt      = net2272_set_halt,
1069	.set_wedge     = net2272_set_wedge,
1070	.fifo_status   = net2272_fifo_status,
1071	.fifo_flush    = net2272_fifo_flush,
1072};
1073
1074/*---------------------------------------------------------------------------*/
1075
1076static int
1077net2272_get_frame(struct usb_gadget *_gadget)
1078{
1079	struct net2272 *dev;
1080	unsigned long flags;
1081	u16 ret;
1082
1083	if (!_gadget)
1084		return -ENODEV;
1085	dev = container_of(_gadget, struct net2272, gadget);
1086	spin_lock_irqsave(&dev->lock, flags);
1087
1088	ret = net2272_read(dev, FRAME1) << 8;
1089	ret |= net2272_read(dev, FRAME0);
1090
1091	spin_unlock_irqrestore(&dev->lock, flags);
1092	return ret;
1093}
1094
1095static int
1096net2272_wakeup(struct usb_gadget *_gadget)
1097{
1098	struct net2272 *dev;
1099	u8 tmp;
1100	unsigned long flags;
1101
1102	if (!_gadget)
1103		return 0;
1104	dev = container_of(_gadget, struct net2272, gadget);
1105
1106	spin_lock_irqsave(&dev->lock, flags);
1107	tmp = net2272_read(dev, USBCTL0);
1108	if (tmp & (1 << IO_WAKEUP_ENABLE))
1109		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1110
1111	spin_unlock_irqrestore(&dev->lock, flags);
1112
1113	return 0;
1114}
1115
1116static int
1117net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1118{
1119	if (!_gadget)
1120		return -ENODEV;
1121
1122	_gadget->is_selfpowered = (value != 0);
1123
1124	return 0;
1125}
1126
1127static int
1128net2272_pullup(struct usb_gadget *_gadget, int is_on)
1129{
1130	struct net2272 *dev;
1131	u8 tmp;
1132	unsigned long flags;
1133
1134	if (!_gadget)
1135		return -ENODEV;
1136	dev = container_of(_gadget, struct net2272, gadget);
1137
1138	spin_lock_irqsave(&dev->lock, flags);
1139	tmp = net2272_read(dev, USBCTL0);
1140	dev->softconnect = (is_on != 0);
1141	if (is_on)
1142		tmp |= (1 << USB_DETECT_ENABLE);
1143	else
1144		tmp &= ~(1 << USB_DETECT_ENABLE);
1145	net2272_write(dev, USBCTL0, tmp);
1146	spin_unlock_irqrestore(&dev->lock, flags);
1147
1148	return 0;
1149}
1150
1151static int net2272_start(struct usb_gadget *_gadget,
1152		struct usb_gadget_driver *driver);
1153static int net2272_stop(struct usb_gadget *_gadget);
1154static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
1155
1156static const struct usb_gadget_ops net2272_ops = {
1157	.get_frame	= net2272_get_frame,
1158	.wakeup		= net2272_wakeup,
1159	.set_selfpowered = net2272_set_selfpowered,
1160	.pullup		= net2272_pullup,
1161	.udc_start	= net2272_start,
1162	.udc_stop	= net2272_stop,
1163	.udc_async_callbacks = net2272_async_callbacks,
1164};
1165
1166/*---------------------------------------------------------------------------*/
1167
1168static ssize_t
1169registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1170{
1171	struct net2272 *dev;
1172	char *next;
1173	unsigned size, t;
1174	unsigned long flags;
1175	u8 t1, t2;
1176	int i;
1177	const char *s;
1178
1179	dev = dev_get_drvdata(_dev);
1180	next = buf;
1181	size = PAGE_SIZE;
1182	spin_lock_irqsave(&dev->lock, flags);
1183
 
 
 
 
 
1184	/* Main Control Registers */
1185	t = scnprintf(next, size, "%s version %s,"
1186		"chiprev %02x, locctl %02x\n"
1187		"irqenb0 %02x irqenb1 %02x "
1188		"irqstat0 %02x irqstat1 %02x\n",
1189		driver_name, driver_vers, dev->chiprev,
1190		net2272_read(dev, LOCCTL),
1191		net2272_read(dev, IRQENB0),
1192		net2272_read(dev, IRQENB1),
1193		net2272_read(dev, IRQSTAT0),
1194		net2272_read(dev, IRQSTAT1));
1195	size -= t;
1196	next += t;
1197
1198	/* DMA */
1199	t1 = net2272_read(dev, DMAREQ);
1200	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1201		t1, ep_name[(t1 & 0x01) + 1],
1202		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1203		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1204		t1 & (1 << DMA_REQUEST) ? "req " : "",
1205		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1206	size -= t;
1207	next += t;
1208
1209	/* USB Control Registers */
1210	t1 = net2272_read(dev, USBCTL1);
1211	if (t1 & (1 << VBUS_PIN)) {
1212		if (t1 & (1 << USB_HIGH_SPEED))
1213			s = "high speed";
1214		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1215			s = "powered";
1216		else
1217			s = "full speed";
1218	} else
1219		s = "not attached";
1220	t = scnprintf(next, size,
1221		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1222		net2272_read(dev, USBCTL0), t1,
1223		net2272_read(dev, OURADDR), s);
1224	size -= t;
1225	next += t;
1226
1227	/* Endpoint Registers */
1228	for (i = 0; i < 4; ++i) {
1229		struct net2272_ep *ep;
1230
1231		ep = &dev->ep[i];
1232		if (i && !ep->desc)
1233			continue;
1234
1235		t1 = net2272_ep_read(ep, EP_CFG);
1236		t2 = net2272_ep_read(ep, EP_RSPSET);
1237		t = scnprintf(next, size,
1238			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1239			"irqenb %02x\n",
1240			ep->ep.name, t1, t2,
1241			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1242			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1243			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1244			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1245			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1246			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1247			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1248			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1249			net2272_ep_read(ep, EP_IRQENB));
1250		size -= t;
1251		next += t;
1252
1253		t = scnprintf(next, size,
1254			"\tstat0 %02x stat1 %02x avail %04x "
1255			"(ep%d%s-%s)%s\n",
1256			net2272_ep_read(ep, EP_STAT0),
1257			net2272_ep_read(ep, EP_STAT1),
1258			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1259			t1 & 0x0f,
1260			ep->is_in ? "in" : "out",
1261			type_string(t1 >> 5),
1262			ep->stopped ? "*" : "");
1263		size -= t;
1264		next += t;
1265
1266		t = scnprintf(next, size,
1267			"\tep_transfer %06x\n",
1268			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1269			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1270			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1271		size -= t;
1272		next += t;
1273
1274		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1275		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1276		t = scnprintf(next, size,
1277			"\tbuf-a %s buf-b %s\n",
1278			buf_state_string(t1),
1279			buf_state_string(t2));
1280		size -= t;
1281		next += t;
1282	}
1283
1284	spin_unlock_irqrestore(&dev->lock, flags);
1285
1286	return PAGE_SIZE - size;
1287}
1288static DEVICE_ATTR_RO(registers);
1289
1290/*---------------------------------------------------------------------------*/
1291
1292static void
1293net2272_set_fifo_mode(struct net2272 *dev, int mode)
1294{
1295	u8 tmp;
1296
1297	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1298	tmp |= (mode << 6);
1299	net2272_write(dev, LOCCTL, tmp);
1300
1301	INIT_LIST_HEAD(&dev->gadget.ep_list);
1302
1303	/* always ep-a, ep-c ... maybe not ep-b */
1304	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1305
1306	switch (mode) {
1307	case 0:
1308		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1309		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1310		break;
1311	case 1:
1312		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1313		dev->ep[1].fifo_size = 1024;
1314		dev->ep[2].fifo_size = 512;
1315		break;
1316	case 2:
1317		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1318		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1319		break;
1320	case 3:
1321		dev->ep[1].fifo_size = 1024;
1322		break;
1323	}
1324
1325	/* ep-c is always 2 512 byte buffers */
1326	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1327	dev->ep[3].fifo_size = 512;
1328}
1329
1330/*---------------------------------------------------------------------------*/
1331
1332static void
1333net2272_usb_reset(struct net2272 *dev)
1334{
1335	dev->gadget.speed = USB_SPEED_UNKNOWN;
1336
1337	net2272_cancel_dma(dev);
1338
1339	net2272_write(dev, IRQENB0, 0);
1340	net2272_write(dev, IRQENB1, 0);
1341
1342	/* clear irq state */
1343	net2272_write(dev, IRQSTAT0, 0xff);
1344	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1345
1346	net2272_write(dev, DMAREQ,
1347		(0 << DMA_BUFFER_VALID) |
1348		(0 << DMA_REQUEST_ENABLE) |
1349		(1 << DMA_CONTROL_DACK) |
1350		(dev->dma_eot_polarity << EOT_POLARITY) |
1351		(dev->dma_dack_polarity << DACK_POLARITY) |
1352		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1353		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1354
1355	net2272_cancel_dma(dev);
1356	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1357
1358	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1359	 * note that the higher level gadget drivers are expected to convert data to little endian.
1360	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1361	 */
1362	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1363	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1364}
1365
1366static void
1367net2272_usb_reinit(struct net2272 *dev)
1368{
1369	int i;
1370
1371	/* basic endpoint init */
1372	for (i = 0; i < 4; ++i) {
1373		struct net2272_ep *ep = &dev->ep[i];
1374
1375		ep->ep.name = ep_name[i];
1376		ep->dev = dev;
1377		ep->num = i;
1378		ep->not_empty = 0;
1379
1380		if (use_dma && ep->num == dma_ep)
1381			ep->dma = 1;
1382
1383		if (i > 0 && i <= 3)
1384			ep->fifo_size = 512;
1385		else
1386			ep->fifo_size = 64;
1387		net2272_ep_reset(ep);
1388
1389		if (i == 0) {
1390			ep->ep.caps.type_control = true;
1391		} else {
1392			ep->ep.caps.type_iso = true;
1393			ep->ep.caps.type_bulk = true;
1394			ep->ep.caps.type_int = true;
1395		}
1396
1397		ep->ep.caps.dir_in = true;
1398		ep->ep.caps.dir_out = true;
1399	}
1400	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1401
1402	dev->gadget.ep0 = &dev->ep[0].ep;
1403	dev->ep[0].stopped = 0;
1404	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1405}
1406
1407static void
1408net2272_ep0_start(struct net2272 *dev)
1409{
1410	struct net2272_ep *ep0 = &dev->ep[0];
1411
1412	net2272_ep_write(ep0, EP_RSPSET,
1413		(1 << NAK_OUT_PACKETS_MODE) |
1414		(1 << ALT_NAK_OUT_PACKETS));
1415	net2272_ep_write(ep0, EP_RSPCLR,
1416		(1 << HIDE_STATUS_PHASE) |
1417		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1418	net2272_write(dev, USBCTL0,
1419		(dev->softconnect << USB_DETECT_ENABLE) |
1420		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1421		(1 << IO_WAKEUP_ENABLE));
1422	net2272_write(dev, IRQENB0,
1423		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1424		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1425		(1 << DMA_DONE_INTERRUPT_ENABLE));
1426	net2272_write(dev, IRQENB1,
1427		(1 << VBUS_INTERRUPT_ENABLE) |
1428		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1429		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1430}
1431
1432/* when a driver is successfully registered, it will receive
1433 * control requests including set_configuration(), which enables
1434 * non-control requests.  then usb traffic follows until a
1435 * disconnect is reported.  then a host may connect again, or
1436 * the driver might get unbound.
1437 */
1438static int net2272_start(struct usb_gadget *_gadget,
1439		struct usb_gadget_driver *driver)
1440{
1441	struct net2272 *dev;
1442	unsigned i;
1443
1444	if (!driver || !driver->setup ||
1445	    driver->max_speed != USB_SPEED_HIGH)
1446		return -EINVAL;
1447
1448	dev = container_of(_gadget, struct net2272, gadget);
1449
1450	for (i = 0; i < 4; ++i)
1451		dev->ep[i].irqs = 0;
1452	/* hook up the driver ... */
1453	dev->softconnect = 1;
 
1454	dev->driver = driver;
1455
1456	/* ... then enable host detection and ep0; and we're ready
1457	 * for set_configuration as well as eventual disconnect.
1458	 */
1459	net2272_ep0_start(dev);
1460
1461	return 0;
1462}
1463
1464static void
1465stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1466{
1467	int i;
1468
1469	/* don't disconnect if it's not connected */
1470	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1471		driver = NULL;
1472
1473	/* stop hardware; prevent new request submissions;
1474	 * and kill any outstanding requests.
1475	 */
1476	net2272_usb_reset(dev);
1477	for (i = 0; i < 4; ++i)
1478		net2272_dequeue_all(&dev->ep[i]);
1479
1480	/* report disconnect; the driver is already quiesced */
1481	if (dev->async_callbacks && driver) {
1482		spin_unlock(&dev->lock);
1483		driver->disconnect(&dev->gadget);
1484		spin_lock(&dev->lock);
1485	}
1486
1487	net2272_usb_reinit(dev);
1488}
1489
1490static int net2272_stop(struct usb_gadget *_gadget)
1491{
1492	struct net2272 *dev;
1493	unsigned long flags;
1494
1495	dev = container_of(_gadget, struct net2272, gadget);
1496
1497	spin_lock_irqsave(&dev->lock, flags);
1498	stop_activity(dev, NULL);
1499	spin_unlock_irqrestore(&dev->lock, flags);
1500
1501	dev->driver = NULL;
1502
1503	return 0;
1504}
1505
1506static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
1507{
1508	struct net2272	*dev = container_of(_gadget, struct net2272, gadget);
1509
1510	spin_lock_irq(&dev->lock);
1511	dev->async_callbacks = enable;
1512	spin_unlock_irq(&dev->lock);
1513}
1514
1515/*---------------------------------------------------------------------------*/
1516/* handle ep-a/ep-b dma completions */
1517static void
1518net2272_handle_dma(struct net2272_ep *ep)
1519{
1520	struct net2272_request *req;
1521	unsigned len;
1522	int status;
1523
1524	if (!list_empty(&ep->queue))
1525		req = list_entry(ep->queue.next,
1526				struct net2272_request, queue);
1527	else
1528		req = NULL;
1529
1530	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1531
1532	/* Ensure DREQ is de-asserted */
1533	net2272_write(ep->dev, DMAREQ,
1534		(0 << DMA_BUFFER_VALID)
1535	      | (0 << DMA_REQUEST_ENABLE)
1536	      | (1 << DMA_CONTROL_DACK)
1537	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1538	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1539	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1540	      | (ep->dma << DMA_ENDPOINT_SELECT));
1541
1542	ep->dev->dma_busy = 0;
1543
1544	net2272_ep_write(ep, EP_IRQENB,
1545		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1546		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1547		| net2272_ep_read(ep, EP_IRQENB));
1548
1549	/* device-to-host transfer completed */
1550	if (ep->is_in) {
1551		/* validate a short packet or zlp if necessary */
1552		if ((req->req.length % ep->ep.maxpacket != 0) ||
1553				req->req.zero)
1554			set_fifo_bytecount(ep, 0);
1555
1556		net2272_done(ep, req, 0);
1557		if (!list_empty(&ep->queue)) {
1558			req = list_entry(ep->queue.next,
1559					struct net2272_request, queue);
1560			status = net2272_kick_dma(ep, req);
1561			if (status < 0)
1562				net2272_pio_advance(ep);
1563		}
1564
1565	/* host-to-device transfer completed */
1566	} else {
1567		/* terminated with a short packet? */
1568		if (net2272_read(ep->dev, IRQSTAT0) &
1569				(1 << DMA_DONE_INTERRUPT)) {
1570			/* abort system dma */
1571			net2272_cancel_dma(ep->dev);
1572		}
1573
1574		/* EP_TRANSFER will contain the number of bytes
1575		 * actually received.
1576		 * NOTE: There is no overflow detection on EP_TRANSFER:
1577		 * We can't deal with transfers larger than 2^24 bytes!
1578		 */
1579		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1580			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1581			| (net2272_ep_read(ep, EP_TRANSFER0));
1582
1583		if (ep->not_empty)
1584			len += 4;
1585
1586		req->req.actual += len;
1587
1588		/* get any remaining data */
1589		net2272_pio_advance(ep);
1590	}
1591}
1592
1593/*---------------------------------------------------------------------------*/
1594
1595static void
1596net2272_handle_ep(struct net2272_ep *ep)
1597{
1598	struct net2272_request *req;
1599	u8 stat0, stat1;
1600
1601	if (!list_empty(&ep->queue))
1602		req = list_entry(ep->queue.next,
1603			struct net2272_request, queue);
1604	else
1605		req = NULL;
1606
1607	/* ack all, and handle what we care about */
1608	stat0 = net2272_ep_read(ep, EP_STAT0);
1609	stat1 = net2272_ep_read(ep, EP_STAT1);
1610	ep->irqs++;
1611
1612	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1613		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1614
1615	net2272_ep_write(ep, EP_STAT0, stat0 &
1616		~((1 << NAK_OUT_PACKETS)
1617		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1618	net2272_ep_write(ep, EP_STAT1, stat1);
1619
1620	/* data packet(s) received (in the fifo, OUT)
1621	 * direction must be validated, otherwise control read status phase
1622	 * could be interpreted as a valid packet
1623	 */
1624	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1625		net2272_pio_advance(ep);
1626	/* data packet(s) transmitted (IN) */
1627	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1628		net2272_pio_advance(ep);
1629}
1630
1631static struct net2272_ep *
1632net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1633{
1634	struct net2272_ep *ep;
1635
1636	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1637		return &dev->ep[0];
1638
1639	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1640		u8 bEndpointAddress;
1641
1642		if (!ep->desc)
1643			continue;
1644		bEndpointAddress = ep->desc->bEndpointAddress;
1645		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1646			continue;
1647		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1648			return ep;
1649	}
1650	return NULL;
1651}
1652
1653/*
1654 * USB Test Packet:
1655 * JKJKJKJK * 9
1656 * JJKKJJKK * 8
1657 * JJJJKKKK * 8
1658 * JJJJJJJKKKKKKK * 8
1659 * JJJJJJJK * 8
1660 * {JKKKKKKK * 10}, JK
1661 */
1662static const u8 net2272_test_packet[] = {
1663	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1664	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1665	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1666	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1667	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1668	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1669};
1670
1671static void
1672net2272_set_test_mode(struct net2272 *dev, int mode)
1673{
1674	int i;
1675
1676	/* Disable all net2272 interrupts:
1677	 * Nothing but a power cycle should stop the test.
1678	 */
1679	net2272_write(dev, IRQENB0, 0x00);
1680	net2272_write(dev, IRQENB1, 0x00);
1681
1682	/* Force tranceiver to high-speed */
1683	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1684
1685	net2272_write(dev, PAGESEL, 0);
1686	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1687	net2272_write(dev, EP_RSPCLR,
1688			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1689			| (1 << HIDE_STATUS_PHASE));
1690	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1691	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1692
1693	/* wait for status phase to complete */
1694	while (!(net2272_read(dev, EP_STAT0) &
1695				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1696		;
1697
1698	/* Enable test mode */
1699	net2272_write(dev, USBTEST, mode);
1700
1701	/* load test packet */
1702	if (mode == USB_TEST_PACKET) {
1703		/* switch to 8 bit mode */
1704		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1705				~(1 << DATA_WIDTH));
1706
1707		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1708			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1709
1710		/* Validate test packet */
1711		net2272_write(dev, EP_TRANSFER0, 0);
1712	}
1713}
1714
1715static void
1716net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1717{
1718	struct net2272_ep *ep;
1719	u8 num, scratch;
1720
1721	/* starting a control request? */
1722	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1723		union {
1724			u8 raw[8];
1725			struct usb_ctrlrequest	r;
1726		} u;
1727		int tmp = 0;
1728		struct net2272_request *req;
1729
1730		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1731			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1732				dev->gadget.speed = USB_SPEED_HIGH;
1733			else
1734				dev->gadget.speed = USB_SPEED_FULL;
1735			dev_dbg(dev->dev, "%s\n",
1736				usb_speed_string(dev->gadget.speed));
1737		}
1738
1739		ep = &dev->ep[0];
1740		ep->irqs++;
1741
1742		/* make sure any leftover interrupt state is cleared */
1743		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1744		while (!list_empty(&ep->queue)) {
1745			req = list_entry(ep->queue.next,
1746				struct net2272_request, queue);
1747			net2272_done(ep, req,
1748				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1749		}
1750		ep->stopped = 0;
1751		dev->protocol_stall = 0;
1752		net2272_ep_write(ep, EP_STAT0,
1753			    (1 << DATA_IN_TOKEN_INTERRUPT)
1754			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1755			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1756			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1757			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1758		net2272_ep_write(ep, EP_STAT1,
1759			    (1 << TIMEOUT)
1760			  | (1 << USB_OUT_ACK_SENT)
1761			  | (1 << USB_OUT_NAK_SENT)
1762			  | (1 << USB_IN_ACK_RCVD)
1763			  | (1 << USB_IN_NAK_SENT)
1764			  | (1 << USB_STALL_SENT)
1765			  | (1 << LOCAL_OUT_ZLP));
1766
1767		/*
1768		 * Ensure Control Read pre-validation setting is beyond maximum size
1769		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1770		 *    an EP0 transfer following the Control Write is a Control Read,
1771		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1772		 *    pre-validation count.
1773		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1774		 *    the pre-validation count cannot cause an unexpected validatation
1775		 */
1776		net2272_write(dev, PAGESEL, 0);
1777		net2272_write(dev, EP_TRANSFER2, 0xff);
1778		net2272_write(dev, EP_TRANSFER1, 0xff);
1779		net2272_write(dev, EP_TRANSFER0, 0xff);
1780
1781		u.raw[0] = net2272_read(dev, SETUP0);
1782		u.raw[1] = net2272_read(dev, SETUP1);
1783		u.raw[2] = net2272_read(dev, SETUP2);
1784		u.raw[3] = net2272_read(dev, SETUP3);
1785		u.raw[4] = net2272_read(dev, SETUP4);
1786		u.raw[5] = net2272_read(dev, SETUP5);
1787		u.raw[6] = net2272_read(dev, SETUP6);
1788		u.raw[7] = net2272_read(dev, SETUP7);
1789		/*
1790		 * If you have a big endian cpu make sure le16_to_cpus
1791		 * performs the proper byte swapping here...
1792		 */
1793		le16_to_cpus(&u.r.wValue);
1794		le16_to_cpus(&u.r.wIndex);
1795		le16_to_cpus(&u.r.wLength);
1796
1797		/* ack the irq */
1798		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1799		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1800
1801		/* watch control traffic at the token level, and force
1802		 * synchronization before letting the status phase happen.
1803		 */
1804		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1805		if (ep->is_in) {
1806			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1807				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1808				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1809			stop_out_naking(ep);
1810		} else
1811			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1812				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814		net2272_ep_write(ep, EP_IRQENB, scratch);
1815
1816		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1817			goto delegate;
1818		switch (u.r.bRequest) {
1819		case USB_REQ_GET_STATUS: {
1820			struct net2272_ep *e;
1821			u16 status = 0;
1822
1823			switch (u.r.bRequestType & USB_RECIP_MASK) {
1824			case USB_RECIP_ENDPOINT:
1825				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1826				if (!e || u.r.wLength > 2)
1827					goto do_stall;
1828				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1829					status = cpu_to_le16(1);
1830				else
1831					status = cpu_to_le16(0);
1832
1833				/* don't bother with a request object! */
1834				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1835				writew(status, net2272_reg_addr(dev, EP_DATA));
1836				set_fifo_bytecount(&dev->ep[0], 0);
1837				allow_status(ep);
1838				dev_vdbg(dev->dev, "%s stat %02x\n",
1839					ep->ep.name, status);
1840				goto next_endpoints;
1841			case USB_RECIP_DEVICE:
1842				if (u.r.wLength > 2)
1843					goto do_stall;
1844				if (dev->gadget.is_selfpowered)
1845					status = (1 << USB_DEVICE_SELF_POWERED);
1846
1847				/* don't bother with a request object! */
1848				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849				writew(status, net2272_reg_addr(dev, EP_DATA));
1850				set_fifo_bytecount(&dev->ep[0], 0);
1851				allow_status(ep);
1852				dev_vdbg(dev->dev, "device stat %02x\n", status);
1853				goto next_endpoints;
1854			case USB_RECIP_INTERFACE:
1855				if (u.r.wLength > 2)
1856					goto do_stall;
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "interface status %02x\n", status);
1864				goto next_endpoints;
1865			}
1866
1867			break;
1868		}
1869		case USB_REQ_CLEAR_FEATURE: {
1870			struct net2272_ep *e;
1871
1872			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1873				goto delegate;
1874			if (u.r.wValue != USB_ENDPOINT_HALT ||
1875			    u.r.wLength != 0)
1876				goto do_stall;
1877			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1878			if (!e)
1879				goto do_stall;
1880			if (e->wedged) {
1881				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1882					ep->ep.name);
1883			} else {
1884				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1885				clear_halt(e);
1886			}
1887			allow_status(ep);
1888			goto next_endpoints;
1889		}
1890		case USB_REQ_SET_FEATURE: {
1891			struct net2272_ep *e;
1892
1893			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894				if (u.r.wIndex != NORMAL_OPERATION)
1895					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1896				allow_status(ep);
1897				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1898				goto next_endpoints;
1899			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1900				goto delegate;
1901			if (u.r.wValue != USB_ENDPOINT_HALT ||
1902			    u.r.wLength != 0)
1903				goto do_stall;
1904			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1905			if (!e)
1906				goto do_stall;
1907			set_halt(e);
1908			allow_status(ep);
1909			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1910			goto next_endpoints;
1911		}
1912		case USB_REQ_SET_ADDRESS: {
1913			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1914			allow_status(ep);
1915			break;
1916		}
1917		default:
1918 delegate:
1919			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1920				"ep_cfg %08x\n",
1921				u.r.bRequestType, u.r.bRequest,
1922				u.r.wValue, u.r.wIndex,
1923				net2272_ep_read(ep, EP_CFG));
1924			if (dev->async_callbacks) {
1925				spin_unlock(&dev->lock);
1926				tmp = dev->driver->setup(&dev->gadget, &u.r);
1927				spin_lock(&dev->lock);
1928			}
1929		}
1930
1931		/* stall ep0 on error */
1932		if (tmp < 0) {
1933 do_stall:
1934			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1935				u.r.bRequestType, u.r.bRequest, tmp);
1936			dev->protocol_stall = 1;
1937		}
1938	/* endpoint dma irq? */
1939	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1940		net2272_cancel_dma(dev);
1941		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1942		stat &= ~(1 << DMA_DONE_INTERRUPT);
1943		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1944			? 2 : 1;
1945
1946		ep = &dev->ep[num];
1947		net2272_handle_dma(ep);
1948	}
1949
1950 next_endpoints:
1951	/* endpoint data irq? */
1952	scratch = stat & 0x0f;
1953	stat &= ~0x0f;
1954	for (num = 0; scratch; num++) {
1955		u8 t;
1956
1957		/* does this endpoint's FIFO and queue need tending? */
1958		t = 1 << num;
1959		if ((scratch & t) == 0)
1960			continue;
1961		scratch ^= t;
1962
1963		ep = &dev->ep[num];
1964		net2272_handle_ep(ep);
1965	}
1966
1967	/* some interrupts we can just ignore */
1968	stat &= ~(1 << SOF_INTERRUPT);
1969
1970	if (stat)
1971		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1972}
1973
1974static void
1975net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1976{
1977	u8 tmp, mask;
1978
1979	/* after disconnect there's nothing else to do! */
1980	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1981	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1982
1983	if (stat & tmp) {
1984		bool	reset = false;
1985		bool	disconnect = false;
1986
1987		/*
1988		 * Ignore disconnects and resets if the speed hasn't been set.
1989		 * VBUS can bounce and there's always an initial reset.
1990		 */
1991		net2272_write(dev, IRQSTAT1, tmp);
1992		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1993			if ((stat & (1 << VBUS_INTERRUPT)) &&
1994					(net2272_read(dev, USBCTL1) &
1995						(1 << VBUS_PIN)) == 0) {
1996				disconnect = true;
1997				dev_dbg(dev->dev, "disconnect %s\n",
1998					dev->driver->driver.name);
1999			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2000					(net2272_read(dev, USBCTL1) & mask)
2001						== 0) {
2002				reset = true;
2003				dev_dbg(dev->dev, "reset %s\n",
2004					dev->driver->driver.name);
2005			}
2006
2007			if (disconnect || reset) {
2008				stop_activity(dev, dev->driver);
2009				net2272_ep0_start(dev);
2010				if (dev->async_callbacks) {
2011					spin_unlock(&dev->lock);
2012					if (reset)
2013						usb_gadget_udc_reset(&dev->gadget, dev->driver);
2014					else
2015						(dev->driver->disconnect)(&dev->gadget);
2016					spin_lock(&dev->lock);
2017				}
2018				return;
2019			}
2020		}
2021		stat &= ~tmp;
2022
2023		if (!stat)
2024			return;
2025	}
2026
2027	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2028	if (stat & tmp) {
2029		net2272_write(dev, IRQSTAT1, tmp);
2030		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2031			if (dev->async_callbacks && dev->driver->suspend)
2032				dev->driver->suspend(&dev->gadget);
2033			if (!enable_suspend) {
2034				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2035				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2036			}
2037		} else {
2038			if (dev->async_callbacks && dev->driver->resume)
2039				dev->driver->resume(&dev->gadget);
2040		}
2041		stat &= ~tmp;
2042	}
2043
2044	/* clear any other status/irqs */
2045	if (stat)
2046		net2272_write(dev, IRQSTAT1, stat);
2047
2048	/* some status we can just ignore */
2049	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2050			| (1 << SUSPEND_REQUEST_INTERRUPT)
2051			| (1 << RESUME_INTERRUPT));
2052	if (!stat)
2053		return;
2054	else
2055		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2056}
2057
2058static irqreturn_t net2272_irq(int irq, void *_dev)
2059{
2060	struct net2272 *dev = _dev;
2061#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2062	u32 intcsr;
2063#endif
2064#if defined(PLX_PCI_RDK)
2065	u8 dmareq;
2066#endif
2067	spin_lock(&dev->lock);
2068#if defined(PLX_PCI_RDK)
2069	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2070
2071	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2072		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2073				dev->rdk1.plx9054_base_addr + INTCSR);
2074		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2075		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2076		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2077		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2078			dev->rdk1.plx9054_base_addr + INTCSR);
2079	}
2080	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2081		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2082				dev->rdk1.plx9054_base_addr + DMACSR0);
2083
2084		dmareq = net2272_read(dev, DMAREQ);
2085		if (dmareq & 0x01)
2086			net2272_handle_dma(&dev->ep[2]);
2087		else
2088			net2272_handle_dma(&dev->ep[1]);
2089	}
2090#endif
2091#if defined(PLX_PCI_RDK2)
2092	/* see if PCI int for us by checking irqstat */
2093	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2094	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2095		spin_unlock(&dev->lock);
2096		return IRQ_NONE;
2097	}
2098	/* check dma interrupts */
2099#endif
2100	/* Platform/devcice interrupt handler */
2101#if !defined(PLX_PCI_RDK)
2102	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2103	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2104#endif
2105	spin_unlock(&dev->lock);
2106
2107	return IRQ_HANDLED;
2108}
2109
2110static int net2272_present(struct net2272 *dev)
2111{
2112	/*
2113	 * Quick test to see if CPU can communicate properly with the NET2272.
2114	 * Verifies connection using writes and reads to write/read and
2115	 * read-only registers.
2116	 *
2117	 * This routine is strongly recommended especially during early bring-up
2118	 * of new hardware, however for designs that do not apply Power On System
2119	 * Tests (POST) it may discarded (or perhaps minimized).
2120	 */
2121	unsigned int ii;
2122	u8 val, refval;
2123
2124	/* Verify NET2272 write/read SCRATCH register can write and read */
2125	refval = net2272_read(dev, SCRATCH);
2126	for (ii = 0; ii < 0x100; ii += 7) {
2127		net2272_write(dev, SCRATCH, ii);
2128		val = net2272_read(dev, SCRATCH);
2129		if (val != ii) {
2130			dev_dbg(dev->dev,
2131				"%s: write/read SCRATCH register test failed: "
2132				"wrote:0x%2.2x, read:0x%2.2x\n",
2133				__func__, ii, val);
2134			return -EINVAL;
2135		}
2136	}
2137	/* To be nice, we write the original SCRATCH value back: */
2138	net2272_write(dev, SCRATCH, refval);
2139
2140	/* Verify NET2272 CHIPREV register is read-only: */
2141	refval = net2272_read(dev, CHIPREV_2272);
2142	for (ii = 0; ii < 0x100; ii += 7) {
2143		net2272_write(dev, CHIPREV_2272, ii);
2144		val = net2272_read(dev, CHIPREV_2272);
2145		if (val != refval) {
2146			dev_dbg(dev->dev,
2147				"%s: write/read CHIPREV register test failed: "
2148				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2149				__func__, ii, val, refval);
2150			return -EINVAL;
2151		}
2152	}
2153
2154	/*
2155	 * Verify NET2272's "NET2270 legacy revision" register
2156	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2157	 *    register should read the same value, regardless of the NET2272
2158	 *    silicon revision.  The legacy register applies to NET2270
2159	 *    firmware being applied to the NET2272.
2160	 */
2161	val = net2272_read(dev, CHIPREV_LEGACY);
2162	if (val != NET2270_LEGACY_REV) {
2163		/*
2164		 * Unexpected legacy revision value
2165		 * - Perhaps the chip is a NET2270?
2166		 */
2167		dev_dbg(dev->dev,
2168			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2169			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2170			__func__, NET2270_LEGACY_REV, val);
2171		return -EINVAL;
2172	}
2173
2174	/*
2175	 * Verify NET2272 silicon revision
2176	 *  - This revision register is appropriate for the silicon version
2177	 *    of the NET2272
2178	 */
2179	val = net2272_read(dev, CHIPREV_2272);
2180	switch (val) {
2181	case CHIPREV_NET2272_R1:
2182		/*
2183		 * NET2272 Rev 1 has DMA related errata:
2184		 *  - Newer silicon (Rev 1A or better) required
2185		 */
2186		dev_dbg(dev->dev,
2187			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2188			__func__);
2189		break;
2190	case CHIPREV_NET2272_R1A:
2191		break;
2192	default:
2193		/* NET2272 silicon version *may* not work with this firmware */
2194		dev_dbg(dev->dev,
2195			"%s: unexpected silicon revision register value: "
2196			" CHIPREV_2272: 0x%2.2x\n",
2197			__func__, val);
2198		/*
2199		 * Return Success, even though the chip rev is not an expected value
2200		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2201		 *  - Often, new silicon is perfectly compatible
2202		 */
2203	}
2204
2205	/* Success: NET2272 checks out OK */
2206	return 0;
2207}
2208
2209static void
2210net2272_gadget_release(struct device *_dev)
2211{
2212	struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
2213
2214	kfree(dev);
2215}
2216
2217/*---------------------------------------------------------------------------*/
2218
2219static void
2220net2272_remove(struct net2272 *dev)
2221{
2222	if (dev->added)
2223		usb_del_gadget(&dev->gadget);
2224	free_irq(dev->irq, dev);
2225	iounmap(dev->base_addr);
2226	device_remove_file(dev->dev, &dev_attr_registers);
2227
2228	dev_info(dev->dev, "unbind\n");
2229}
2230
2231static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2232{
2233	struct net2272 *ret;
2234
2235	if (!irq) {
2236		dev_dbg(dev, "No IRQ!\n");
2237		return ERR_PTR(-ENODEV);
2238	}
2239
2240	/* alloc, and start init */
2241	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2242	if (!ret)
2243		return ERR_PTR(-ENOMEM);
2244
2245	spin_lock_init(&ret->lock);
2246	ret->irq = irq;
2247	ret->dev = dev;
2248	ret->gadget.ops = &net2272_ops;
2249	ret->gadget.max_speed = USB_SPEED_HIGH;
2250
2251	/* the "gadget" abstracts/virtualizes the controller */
2252	ret->gadget.name = driver_name;
2253	usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
2254
2255	return ret;
2256}
2257
2258static int
2259net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2260{
2261	int ret;
2262
2263	/* See if there... */
2264	if (net2272_present(dev)) {
2265		dev_warn(dev->dev, "2272 not found!\n");
2266		ret = -ENODEV;
2267		goto err;
2268	}
2269
2270	net2272_usb_reset(dev);
2271	net2272_usb_reinit(dev);
2272
2273	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2274	if (ret) {
2275		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2276		goto err;
2277	}
2278
2279	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2280
2281	/* done */
2282	dev_info(dev->dev, "%s\n", driver_desc);
2283	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2284		dev->irq, dev->base_addr, dev->chiprev,
2285		dma_mode_string());
2286	dev_info(dev->dev, "version: %s\n", driver_vers);
2287
2288	ret = device_create_file(dev->dev, &dev_attr_registers);
2289	if (ret)
2290		goto err_irq;
2291
2292	ret = usb_add_gadget(&dev->gadget);
 
2293	if (ret)
2294		goto err_add_udc;
2295	dev->added = 1;
2296
2297	return 0;
2298
2299err_add_udc:
2300	device_remove_file(dev->dev, &dev_attr_registers);
2301 err_irq:
2302	free_irq(dev->irq, dev);
2303 err:
2304	return ret;
2305}
2306
2307#ifdef CONFIG_USB_PCI
2308
2309/*
2310 * wrap this driver around the specified device, but
2311 * don't respond over USB until a gadget driver binds to us
2312 */
2313
2314static int
2315net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2316{
2317	unsigned long resource, len, tmp;
2318	void __iomem *mem_mapped_addr[4];
2319	int ret, i;
2320
2321	/*
2322	 * BAR 0 holds PLX 9054 config registers
2323	 * BAR 1 is i/o memory; unused here
2324	 * BAR 2 holds EPLD config registers
2325	 * BAR 3 holds NET2272 registers
2326	 */
2327
2328	/* Find and map all address spaces */
2329	for (i = 0; i < 4; ++i) {
2330		if (i == 1)
2331			continue;	/* BAR1 unused */
2332
2333		resource = pci_resource_start(pdev, i);
2334		len = pci_resource_len(pdev, i);
2335
2336		if (!request_mem_region(resource, len, driver_name)) {
2337			dev_dbg(dev->dev, "controller already in use\n");
2338			ret = -EBUSY;
2339			goto err;
2340		}
2341
2342		mem_mapped_addr[i] = ioremap(resource, len);
2343		if (mem_mapped_addr[i] == NULL) {
2344			release_mem_region(resource, len);
2345			dev_dbg(dev->dev, "can't map memory\n");
2346			ret = -EFAULT;
2347			goto err;
2348		}
2349	}
2350
2351	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2352	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2353	dev->base_addr = mem_mapped_addr[3];
2354
2355	/* Set PLX 9054 bus width (16 bits) */
2356	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2357	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2358			dev->rdk1.plx9054_base_addr + LBRD1);
2359
2360	/* Enable PLX 9054 Interrupts */
2361	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2362			(1 << PCI_INTERRUPT_ENABLE) |
2363			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2364			dev->rdk1.plx9054_base_addr + INTCSR);
2365
2366	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2367			dev->rdk1.plx9054_base_addr + DMACSR0);
2368
2369	/* reset */
2370	writeb((1 << EPLD_DMA_ENABLE) |
2371		(1 << DMA_CTL_DACK) |
2372		(1 << DMA_TIMEOUT_ENABLE) |
2373		(1 << USER) |
2374		(0 << MPX_MODE) |
2375		(1 << BUSWIDTH) |
2376		(1 << NET2272_RESET),
2377		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2378
2379	mb();
2380	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2381		~(1 << NET2272_RESET),
2382		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2383	udelay(200);
2384
2385	return 0;
2386
2387 err:
2388	while (--i >= 0) {
2389		if (i == 1)
2390			continue;	/* BAR1 unused */
2391		iounmap(mem_mapped_addr[i]);
2392		release_mem_region(pci_resource_start(pdev, i),
2393			pci_resource_len(pdev, i));
2394	}
2395
2396	return ret;
2397}
2398
2399static int
2400net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2401{
2402	unsigned long resource, len;
2403	void __iomem *mem_mapped_addr[2];
2404	int ret, i;
2405
2406	/*
2407	 * BAR 0 holds FGPA config registers
2408	 * BAR 1 holds NET2272 registers
2409	 */
2410
2411	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2412	for (i = 0; i < 2; ++i) {
2413		resource = pci_resource_start(pdev, i);
2414		len = pci_resource_len(pdev, i);
2415
2416		if (!request_mem_region(resource, len, driver_name)) {
2417			dev_dbg(dev->dev, "controller already in use\n");
2418			ret = -EBUSY;
2419			goto err;
2420		}
2421
2422		mem_mapped_addr[i] = ioremap(resource, len);
2423		if (mem_mapped_addr[i] == NULL) {
2424			release_mem_region(resource, len);
2425			dev_dbg(dev->dev, "can't map memory\n");
2426			ret = -EFAULT;
2427			goto err;
2428		}
2429	}
2430
2431	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2432	dev->base_addr = mem_mapped_addr[1];
2433
2434	mb();
2435	/* Set 2272 bus width (16 bits) and reset */
2436	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2437	udelay(200);
2438	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2439	/* Print fpga version number */
2440	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2441		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2442	/* Enable FPGA Interrupts */
2443	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2444
2445	return 0;
2446
2447 err:
2448	while (--i >= 0) {
2449		iounmap(mem_mapped_addr[i]);
2450		release_mem_region(pci_resource_start(pdev, i),
2451			pci_resource_len(pdev, i));
2452	}
2453
2454	return ret;
2455}
2456
2457static int
2458net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2459{
2460	struct net2272 *dev;
2461	int ret;
2462
2463	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2464	if (IS_ERR(dev))
2465		return PTR_ERR(dev);
2466	dev->dev_id = pdev->device;
2467
2468	if (pci_enable_device(pdev) < 0) {
2469		ret = -ENODEV;
2470		goto err_put;
2471	}
2472
2473	pci_set_master(pdev);
2474
2475	switch (pdev->device) {
2476	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2477	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2478	default: BUG();
2479	}
2480	if (ret)
2481		goto err_pci;
2482
2483	ret = net2272_probe_fin(dev, 0);
2484	if (ret)
2485		goto err_pci;
2486
2487	pci_set_drvdata(pdev, dev);
2488
2489	return 0;
2490
2491 err_pci:
2492	pci_disable_device(pdev);
2493 err_put:
2494	usb_put_gadget(&dev->gadget);
2495
2496	return ret;
2497}
2498
2499static void
2500net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2501{
2502	int i;
2503
2504	/* disable PLX 9054 interrupts */
2505	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2506		~(1 << PCI_INTERRUPT_ENABLE),
2507		dev->rdk1.plx9054_base_addr + INTCSR);
2508
2509	/* clean up resources allocated during probe() */
2510	iounmap(dev->rdk1.plx9054_base_addr);
2511	iounmap(dev->rdk1.epld_base_addr);
2512
2513	for (i = 0; i < 4; ++i) {
2514		if (i == 1)
2515			continue;	/* BAR1 unused */
2516		release_mem_region(pci_resource_start(pdev, i),
2517			pci_resource_len(pdev, i));
2518	}
2519}
2520
2521static void
2522net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2523{
2524	int i;
2525
2526	/* disable fpga interrupts
2527	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2528			~(1 << PCI_INTERRUPT_ENABLE),
2529			dev->rdk1.plx9054_base_addr + INTCSR);
2530	*/
2531
2532	/* clean up resources allocated during probe() */
2533	iounmap(dev->rdk2.fpga_base_addr);
2534
2535	for (i = 0; i < 2; ++i)
2536		release_mem_region(pci_resource_start(pdev, i),
2537			pci_resource_len(pdev, i));
2538}
2539
2540static void
2541net2272_pci_remove(struct pci_dev *pdev)
2542{
2543	struct net2272 *dev = pci_get_drvdata(pdev);
2544
2545	net2272_remove(dev);
2546
2547	switch (pdev->device) {
2548	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2549	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2550	default: BUG();
2551	}
2552
2553	pci_disable_device(pdev);
2554
2555	usb_put_gadget(&dev->gadget);
2556}
2557
2558/* Table of matching PCI IDs */
2559static struct pci_device_id pci_ids[] = {
2560	{	/* RDK 1 card */
2561		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562		.class_mask  = 0,
2563		.vendor      = PCI_VENDOR_ID_PLX,
2564		.device      = PCI_DEVICE_ID_RDK1,
2565		.subvendor   = PCI_ANY_ID,
2566		.subdevice   = PCI_ANY_ID,
2567	},
2568	{	/* RDK 2 card */
2569		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2570		.class_mask  = 0,
2571		.vendor      = PCI_VENDOR_ID_PLX,
2572		.device      = PCI_DEVICE_ID_RDK2,
2573		.subvendor   = PCI_ANY_ID,
2574		.subdevice   = PCI_ANY_ID,
2575	},
2576	{ }
2577};
2578MODULE_DEVICE_TABLE(pci, pci_ids);
2579
2580static struct pci_driver net2272_pci_driver = {
2581	.name     = driver_name,
2582	.id_table = pci_ids,
2583
2584	.probe    = net2272_pci_probe,
2585	.remove   = net2272_pci_remove,
2586};
2587
2588static int net2272_pci_register(void)
2589{
2590	return pci_register_driver(&net2272_pci_driver);
2591}
2592
2593static void net2272_pci_unregister(void)
2594{
2595	pci_unregister_driver(&net2272_pci_driver);
2596}
2597
2598#else
2599static inline int net2272_pci_register(void) { return 0; }
2600static inline void net2272_pci_unregister(void) { }
2601#endif
2602
2603/*---------------------------------------------------------------------------*/
2604
2605static int
2606net2272_plat_probe(struct platform_device *pdev)
2607{
2608	struct net2272 *dev;
2609	int ret;
2610	unsigned int irqflags;
2611	resource_size_t base, len;
2612	struct resource *iomem, *iomem_bus, *irq_res;
2613
2614	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2615	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2617	if (!irq_res || !iomem) {
2618		dev_err(&pdev->dev, "must provide irq/base addr");
2619		return -EINVAL;
2620	}
2621
2622	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2623	if (IS_ERR(dev))
2624		return PTR_ERR(dev);
2625
2626	irqflags = 0;
2627	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2628		irqflags |= IRQF_TRIGGER_RISING;
2629	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2630		irqflags |= IRQF_TRIGGER_FALLING;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2632		irqflags |= IRQF_TRIGGER_HIGH;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2634		irqflags |= IRQF_TRIGGER_LOW;
2635
2636	base = iomem->start;
2637	len = resource_size(iomem);
2638	if (iomem_bus)
2639		dev->base_shift = iomem_bus->start;
2640
2641	if (!request_mem_region(base, len, driver_name)) {
2642		dev_dbg(dev->dev, "get request memory region!\n");
2643		ret = -EBUSY;
2644		goto err;
2645	}
2646	dev->base_addr = ioremap(base, len);
2647	if (!dev->base_addr) {
2648		dev_dbg(dev->dev, "can't map memory\n");
2649		ret = -EFAULT;
2650		goto err_req;
2651	}
2652
2653	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2654	if (ret)
2655		goto err_io;
2656
2657	platform_set_drvdata(pdev, dev);
2658	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2659		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2660
2661	return 0;
2662
2663 err_io:
2664	iounmap(dev->base_addr);
2665 err_req:
2666	release_mem_region(base, len);
2667 err:
2668	usb_put_gadget(&dev->gadget);
2669
2670	return ret;
2671}
2672
2673static void
2674net2272_plat_remove(struct platform_device *pdev)
2675{
2676	struct net2272 *dev = platform_get_drvdata(pdev);
2677
2678	net2272_remove(dev);
2679
2680	release_mem_region(pdev->resource[0].start,
2681		resource_size(&pdev->resource[0]));
2682
2683	usb_put_gadget(&dev->gadget);
 
 
2684}
2685
2686static struct platform_driver net2272_plat_driver = {
2687	.probe   = net2272_plat_probe,
2688	.remove_new = net2272_plat_remove,
2689	.driver  = {
2690		.name  = driver_name,
2691	},
2692	/* FIXME .suspend, .resume */
2693};
2694MODULE_ALIAS("platform:net2272");
2695
2696static int __init net2272_init(void)
2697{
2698	int ret;
2699
2700	ret = net2272_pci_register();
2701	if (ret)
2702		return ret;
2703	ret = platform_driver_register(&net2272_plat_driver);
2704	if (ret)
2705		goto err_pci;
2706	return ret;
2707
2708err_pci:
2709	net2272_pci_unregister();
2710	return ret;
2711}
2712module_init(net2272_init);
2713
2714static void __exit net2272_cleanup(void)
2715{
2716	net2272_pci_unregister();
2717	platform_driver_unregister(&net2272_plat_driver);
2718}
2719module_exit(net2272_cleanup);
2720
2721MODULE_DESCRIPTION(DRIVER_DESC);
2722MODULE_AUTHOR("PLX Technology, Inc.");
2723MODULE_LICENSE("GPL");
v4.10.11
 
   1/*
   2 * Driver for PLX NET2272 USB device controller
   3 *
   4 * Copyright (C) 2005-2006 PLX Technology, Inc.
   5 * Copyright (C) 2006-2011 Analog Devices, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 */
  21
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/errno.h>
  25#include <linux/gpio.h>
  26#include <linux/init.h>
  27#include <linux/interrupt.h>
  28#include <linux/io.h>
  29#include <linux/ioport.h>
  30#include <linux/kernel.h>
  31#include <linux/list.h>
  32#include <linux/module.h>
  33#include <linux/moduleparam.h>
  34#include <linux/pci.h>
  35#include <linux/platform_device.h>
  36#include <linux/prefetch.h>
  37#include <linux/sched.h>
  38#include <linux/slab.h>
  39#include <linux/timer.h>
  40#include <linux/usb.h>
  41#include <linux/usb/ch9.h>
  42#include <linux/usb/gadget.h>
  43
  44#include <asm/byteorder.h>
  45#include <asm/unaligned.h>
  46
  47#include "net2272.h"
  48
  49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  50
  51static const char driver_name[] = "net2272";
  52static const char driver_vers[] = "2006 October 17/mainline";
  53static const char driver_desc[] = DRIVER_DESC;
  54
  55static const char ep0name[] = "ep0";
  56static const char * const ep_name[] = {
  57	ep0name,
  58	"ep-a", "ep-b", "ep-c",
  59};
  60
  61#ifdef CONFIG_USB_NET2272_DMA
  62/*
  63 * use_dma: the NET2272 can use an external DMA controller.
  64 * Note that since there is no generic DMA api, some functions,
  65 * notably request_dma, start_dma, and cancel_dma will need to be
  66 * modified for your platform's particular dma controller.
  67 *
  68 * If use_dma is disabled, pio will be used instead.
  69 */
  70static bool use_dma = 0;
  71module_param(use_dma, bool, 0644);
  72
  73/*
  74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  75 * The NET2272 can only use dma for a single endpoint at a time.
  76 * At some point this could be modified to allow either endpoint
  77 * to take control of dma as it becomes available.
  78 *
  79 * Note that DMA should not be used on OUT endpoints unless it can
  80 * be guaranteed that no short packets will arrive on an IN endpoint
  81 * while the DMA operation is pending.  Otherwise the OUT DMA will
  82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  83 */
  84static ushort dma_ep = 1;
  85module_param(dma_ep, ushort, 0644);
  86
  87/*
  88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
  89 *	mode 0 == Slow DREQ mode
  90 *	mode 1 == Fast DREQ mode
  91 *	mode 2 == Burst mode
  92 */
  93static ushort dma_mode = 2;
  94module_param(dma_mode, ushort, 0644);
  95#else
  96#define use_dma 0
  97#define dma_ep 1
  98#define dma_mode 2
  99#endif
 100
 101/*
 102 * fifo_mode: net2272 buffer configuration:
 103 *      mode 0 == ep-{a,b,c} 512db each
 104 *      mode 1 == ep-a 1k, ep-{b,c} 512db
 105 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
 106 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
 107 */
 108static ushort fifo_mode = 0;
 109module_param(fifo_mode, ushort, 0644);
 110
 111/*
 112 * enable_suspend: When enabled, the driver will respond to
 113 * USB suspend requests by powering down the NET2272.  Otherwise,
 114 * USB suspend requests will be ignored.  This is acceptible for
 115 * self-powered devices.  For bus powered devices set this to 1.
 116 */
 117static ushort enable_suspend = 0;
 118module_param(enable_suspend, ushort, 0644);
 119
 120static void assert_out_naking(struct net2272_ep *ep, const char *where)
 121{
 122	u8 tmp;
 123
 124#ifndef DEBUG
 125	return;
 126#endif
 127
 128	tmp = net2272_ep_read(ep, EP_STAT0);
 129	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 130		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 131			ep->ep.name, where, tmp);
 132		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 133	}
 134}
 135#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 136
 137static void stop_out_naking(struct net2272_ep *ep)
 138{
 139	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 140
 141	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 142		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 143}
 144
 145#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 146
 147static char *type_string(u8 bmAttributes)
 148{
 149	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 150	case USB_ENDPOINT_XFER_BULK: return "bulk";
 151	case USB_ENDPOINT_XFER_ISOC: return "iso";
 152	case USB_ENDPOINT_XFER_INT:  return "intr";
 153	default:                     return "control";
 154	}
 155}
 156
 157static char *buf_state_string(unsigned state)
 158{
 159	switch (state) {
 160	case BUFF_FREE:  return "free";
 161	case BUFF_VALID: return "valid";
 162	case BUFF_LCL:   return "local";
 163	case BUFF_USB:   return "usb";
 164	default:         return "unknown";
 165	}
 166}
 167
 168static char *dma_mode_string(void)
 169{
 170	if (!use_dma)
 171		return "PIO";
 172	switch (dma_mode) {
 173	case 0:  return "SLOW DREQ";
 174	case 1:  return "FAST DREQ";
 175	case 2:  return "BURST";
 176	default: return "invalid";
 177	}
 178}
 179
 180static void net2272_dequeue_all(struct net2272_ep *);
 181static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 182static int net2272_fifo_status(struct usb_ep *);
 183
 184static struct usb_ep_ops net2272_ep_ops;
 185
 186/*---------------------------------------------------------------------------*/
 187
 188static int
 189net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 190{
 191	struct net2272 *dev;
 192	struct net2272_ep *ep;
 193	u32 max;
 194	u8 tmp;
 195	unsigned long flags;
 196
 197	ep = container_of(_ep, struct net2272_ep, ep);
 198	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 199			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 200		return -EINVAL;
 201	dev = ep->dev;
 202	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 203		return -ESHUTDOWN;
 204
 205	max = usb_endpoint_maxp(desc);
 206
 207	spin_lock_irqsave(&dev->lock, flags);
 208	_ep->maxpacket = max;
 209	ep->desc = desc;
 210
 211	/* net2272_ep_reset() has already been called */
 212	ep->stopped = 0;
 213	ep->wedged = 0;
 214
 215	/* set speed-dependent max packet */
 216	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 217	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 218
 219	/* set type, direction, address; reset fifo counters */
 220	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 221	tmp = usb_endpoint_type(desc);
 222	if (usb_endpoint_xfer_bulk(desc)) {
 223		/* catch some particularly blatant driver bugs */
 224		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 225		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 226			spin_unlock_irqrestore(&dev->lock, flags);
 227			return -ERANGE;
 228		}
 229	}
 230	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 231	tmp <<= ENDPOINT_TYPE;
 232	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 233	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 234	tmp |= (1 << ENDPOINT_ENABLE);
 235
 236	/* for OUT transfers, block the rx fifo until a read is posted */
 237	ep->is_in = usb_endpoint_dir_in(desc);
 238	if (!ep->is_in)
 239		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 240
 241	net2272_ep_write(ep, EP_CFG, tmp);
 242
 243	/* enable irqs */
 244	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 245	net2272_write(dev, IRQENB0, tmp);
 246
 247	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 248		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 249		| net2272_ep_read(ep, EP_IRQENB);
 250	net2272_ep_write(ep, EP_IRQENB, tmp);
 251
 252	tmp = desc->bEndpointAddress;
 253	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 254		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 255		type_string(desc->bmAttributes), max,
 256		net2272_ep_read(ep, EP_CFG));
 257
 258	spin_unlock_irqrestore(&dev->lock, flags);
 259	return 0;
 260}
 261
 262static void net2272_ep_reset(struct net2272_ep *ep)
 263{
 264	u8 tmp;
 265
 266	ep->desc = NULL;
 267	INIT_LIST_HEAD(&ep->queue);
 268
 269	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 270	ep->ep.ops = &net2272_ep_ops;
 271
 272	/* disable irqs, endpoint */
 273	net2272_ep_write(ep, EP_IRQENB, 0);
 274
 275	/* init to our chosen defaults, notably so that we NAK OUT
 276	 * packets until the driver queues a read.
 277	 */
 278	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 279	net2272_ep_write(ep, EP_RSPSET, tmp);
 280
 281	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 282	if (ep->num != 0)
 283		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 284
 285	net2272_ep_write(ep, EP_RSPCLR, tmp);
 286
 287	/* scrub most status bits, and flush any fifo state */
 288	net2272_ep_write(ep, EP_STAT0,
 289			  (1 << DATA_IN_TOKEN_INTERRUPT)
 290			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 291			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 292			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 293			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 294
 295	net2272_ep_write(ep, EP_STAT1,
 296			    (1 << TIMEOUT)
 297			  | (1 << USB_OUT_ACK_SENT)
 298			  | (1 << USB_OUT_NAK_SENT)
 299			  | (1 << USB_IN_ACK_RCVD)
 300			  | (1 << USB_IN_NAK_SENT)
 301			  | (1 << USB_STALL_SENT)
 302			  | (1 << LOCAL_OUT_ZLP)
 303			  | (1 << BUFFER_FLUSH));
 304
 305	/* fifo size is handled seperately */
 306}
 307
 308static int net2272_disable(struct usb_ep *_ep)
 309{
 310	struct net2272_ep *ep;
 311	unsigned long flags;
 312
 313	ep = container_of(_ep, struct net2272_ep, ep);
 314	if (!_ep || !ep->desc || _ep->name == ep0name)
 315		return -EINVAL;
 316
 317	spin_lock_irqsave(&ep->dev->lock, flags);
 318	net2272_dequeue_all(ep);
 319	net2272_ep_reset(ep);
 320
 321	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 322
 323	spin_unlock_irqrestore(&ep->dev->lock, flags);
 324	return 0;
 325}
 326
 327/*---------------------------------------------------------------------------*/
 328
 329static struct usb_request *
 330net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 331{
 332	struct net2272_request *req;
 333
 334	if (!_ep)
 335		return NULL;
 336
 337	req = kzalloc(sizeof(*req), gfp_flags);
 338	if (!req)
 339		return NULL;
 340
 341	INIT_LIST_HEAD(&req->queue);
 342
 343	return &req->req;
 344}
 345
 346static void
 347net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 348{
 349	struct net2272_request *req;
 350
 351	if (!_ep || !_req)
 352		return;
 353
 354	req = container_of(_req, struct net2272_request, req);
 355	WARN_ON(!list_empty(&req->queue));
 356	kfree(req);
 357}
 358
 359static void
 360net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 361{
 362	struct net2272 *dev;
 363	unsigned stopped = ep->stopped;
 364
 365	if (ep->num == 0) {
 366		if (ep->dev->protocol_stall) {
 367			ep->stopped = 1;
 368			set_halt(ep);
 369		}
 370		allow_status(ep);
 371	}
 372
 373	list_del_init(&req->queue);
 374
 375	if (req->req.status == -EINPROGRESS)
 376		req->req.status = status;
 377	else
 378		status = req->req.status;
 379
 380	dev = ep->dev;
 381	if (use_dma && ep->dma)
 382		usb_gadget_unmap_request(&dev->gadget, &req->req,
 383				ep->is_in);
 384
 385	if (status && status != -ESHUTDOWN)
 386		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 387			ep->ep.name, &req->req, status,
 388			req->req.actual, req->req.length, req->req.buf);
 389
 390	/* don't modify queue heads during completion callback */
 391	ep->stopped = 1;
 392	spin_unlock(&dev->lock);
 393	usb_gadget_giveback_request(&ep->ep, &req->req);
 394	spin_lock(&dev->lock);
 395	ep->stopped = stopped;
 396}
 397
 398static int
 399net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 400	struct net2272_request *req, unsigned max)
 401{
 402	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 403	u16 *bufp;
 404	unsigned length, count;
 405	u8 tmp;
 406
 407	length = min(req->req.length - req->req.actual, max);
 408	req->req.actual += length;
 409
 410	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 411		ep->ep.name, req, max, length,
 412		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 413
 414	count = length;
 415	bufp = (u16 *)buf;
 416
 417	while (likely(count >= 2)) {
 418		/* no byte-swap required; chip endian set during init */
 419		writew(*bufp++, ep_data);
 420		count -= 2;
 421	}
 422	buf = (u8 *)bufp;
 423
 424	/* write final byte by placing the NET2272 into 8-bit mode */
 425	if (unlikely(count)) {
 426		tmp = net2272_read(ep->dev, LOCCTL);
 427		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 428		writeb(*buf, ep_data);
 429		net2272_write(ep->dev, LOCCTL, tmp);
 430	}
 431	return length;
 432}
 433
 434/* returns: 0: still running, 1: completed, negative: errno */
 435static int
 436net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 437{
 438	u8 *buf;
 439	unsigned count, max;
 440	int status;
 441
 442	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 443		ep->ep.name, req->req.actual, req->req.length);
 444
 445	/*
 446	 * Keep loading the endpoint until the final packet is loaded,
 447	 * or the endpoint buffer is full.
 448	 */
 449 top:
 450	/*
 451	 * Clear interrupt status
 452	 *  - Packet Transmitted interrupt will become set again when the
 453	 *    host successfully takes another packet
 454	 */
 455	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 456	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 457		buf = req->req.buf + req->req.actual;
 458		prefetch(buf);
 459
 460		/* force pagesel */
 461		net2272_ep_read(ep, EP_STAT0);
 462
 463		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 464			(net2272_ep_read(ep, EP_AVAIL0));
 465
 466		if (max < ep->ep.maxpacket)
 467			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 468				| (net2272_ep_read(ep, EP_AVAIL0));
 469
 470		count = net2272_write_packet(ep, buf, req, max);
 471		/* see if we are done */
 472		if (req->req.length == req->req.actual) {
 473			/* validate short or zlp packet */
 474			if (count < ep->ep.maxpacket)
 475				set_fifo_bytecount(ep, 0);
 476			net2272_done(ep, req, 0);
 477
 478			if (!list_empty(&ep->queue)) {
 479				req = list_entry(ep->queue.next,
 480						struct net2272_request,
 481						queue);
 482				status = net2272_kick_dma(ep, req);
 483
 484				if (status < 0)
 485					if ((net2272_ep_read(ep, EP_STAT0)
 486							& (1 << BUFFER_EMPTY)))
 487						goto top;
 488			}
 489			return 1;
 490		}
 491		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 492	}
 493	return 0;
 494}
 495
 496static void
 497net2272_out_flush(struct net2272_ep *ep)
 498{
 499	ASSERT_OUT_NAKING(ep);
 500
 501	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 502			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 503	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 504}
 505
 506static int
 507net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 508	struct net2272_request *req, unsigned avail)
 509{
 510	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 511	unsigned is_short;
 512	u16 *bufp;
 513
 514	req->req.actual += avail;
 515
 516	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 517		ep->ep.name, req, avail,
 518		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 519
 520	is_short = (avail < ep->ep.maxpacket);
 521
 522	if (unlikely(avail == 0)) {
 523		/* remove any zlp from the buffer */
 524		(void)readw(ep_data);
 525		return is_short;
 526	}
 527
 528	/* Ensure we get the final byte */
 529	if (unlikely(avail % 2))
 530		avail++;
 531	bufp = (u16 *)buf;
 532
 533	do {
 534		*bufp++ = readw(ep_data);
 535		avail -= 2;
 536	} while (avail);
 537
 538	/*
 539	 * To avoid false endpoint available race condition must read
 540	 * ep stat0 twice in the case of a short transfer
 541	 */
 542	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 543		net2272_ep_read(ep, EP_STAT0);
 544
 545	return is_short;
 546}
 547
 548static int
 549net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 550{
 551	u8 *buf;
 552	unsigned is_short;
 553	int count;
 554	int tmp;
 555	int cleanup = 0;
 556	int status = -1;
 557
 558	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 559		ep->ep.name, req->req.actual, req->req.length);
 560
 561 top:
 562	do {
 563		buf = req->req.buf + req->req.actual;
 564		prefetchw(buf);
 565
 566		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 567			| net2272_ep_read(ep, EP_AVAIL0);
 568
 569		net2272_ep_write(ep, EP_STAT0,
 570			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 571			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 572
 573		tmp = req->req.length - req->req.actual;
 574
 575		if (count > tmp) {
 576			if ((tmp % ep->ep.maxpacket) != 0) {
 577				dev_err(ep->dev->dev,
 578					"%s out fifo %d bytes, expected %d\n",
 579					ep->ep.name, count, tmp);
 580				cleanup = 1;
 581			}
 582			count = (tmp > 0) ? tmp : 0;
 583		}
 584
 585		is_short = net2272_read_packet(ep, buf, req, count);
 586
 587		/* completion */
 588		if (unlikely(cleanup || is_short ||
 589				((req->req.actual == req->req.length)
 590				 && !req->req.zero))) {
 591
 592			if (cleanup) {
 593				net2272_out_flush(ep);
 594				net2272_done(ep, req, -EOVERFLOW);
 595			} else
 596				net2272_done(ep, req, 0);
 597
 598			/* re-initialize endpoint transfer registers
 599			 * otherwise they may result in erroneous pre-validation
 600			 * for subsequent control reads
 601			 */
 602			if (unlikely(ep->num == 0)) {
 603				net2272_ep_write(ep, EP_TRANSFER2, 0);
 604				net2272_ep_write(ep, EP_TRANSFER1, 0);
 605				net2272_ep_write(ep, EP_TRANSFER0, 0);
 606			}
 607
 608			if (!list_empty(&ep->queue)) {
 
 
 609				req = list_entry(ep->queue.next,
 610					struct net2272_request, queue);
 611				status = net2272_kick_dma(ep, req);
 612				if ((status < 0) &&
 613				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 614					goto top;
 615			}
 616			return 1;
 617		}
 618	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 619
 620	return 0;
 621}
 622
 623static void
 624net2272_pio_advance(struct net2272_ep *ep)
 625{
 626	struct net2272_request *req;
 627
 628	if (unlikely(list_empty(&ep->queue)))
 629		return;
 630
 631	req = list_entry(ep->queue.next, struct net2272_request, queue);
 632	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 633}
 634
 635/* returns 0 on success, else negative errno */
 636static int
 637net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 638	unsigned len, unsigned dir)
 639{
 640	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 641		ep, buf, len, dir);
 642
 643	/* The NET2272 only supports a single dma channel */
 644	if (dev->dma_busy)
 645		return -EBUSY;
 646	/*
 647	 * EP_TRANSFER (used to determine the number of bytes received
 648	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 649	 */
 650	if ((dir == 1) && (len > 0x1000000))
 651		return -EINVAL;
 652
 653	dev->dma_busy = 1;
 654
 655	/* initialize platform's dma */
 656#ifdef CONFIG_PCI
 657	/* NET2272 addr, buffer addr, length, etc. */
 658	switch (dev->dev_id) {
 659	case PCI_DEVICE_ID_RDK1:
 660		/* Setup PLX 9054 DMA mode */
 661		writel((1 << LOCAL_BUS_WIDTH) |
 662			(1 << TA_READY_INPUT_ENABLE) |
 663			(0 << LOCAL_BURST_ENABLE) |
 664			(1 << DONE_INTERRUPT_ENABLE) |
 665			(1 << LOCAL_ADDRESSING_MODE) |
 666			(1 << DEMAND_MODE) |
 667			(1 << DMA_EOT_ENABLE) |
 668			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 669			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 670			dev->rdk1.plx9054_base_addr + DMAMODE0);
 671
 672		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 673		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 674		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 675		writel((dir << DIRECTION_OF_TRANSFER) |
 676			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 677			dev->rdk1.plx9054_base_addr + DMADPR0);
 678		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 679			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 680			dev->rdk1.plx9054_base_addr + INTCSR);
 681
 682		break;
 683	}
 684#endif
 685
 686	net2272_write(dev, DMAREQ,
 687		(0 << DMA_BUFFER_VALID) |
 688		(1 << DMA_REQUEST_ENABLE) |
 689		(1 << DMA_CONTROL_DACK) |
 690		(dev->dma_eot_polarity << EOT_POLARITY) |
 691		(dev->dma_dack_polarity << DACK_POLARITY) |
 692		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 693		((ep >> 1) << DMA_ENDPOINT_SELECT));
 694
 695	(void) net2272_read(dev, SCRATCH);
 696
 697	return 0;
 698}
 699
 700static void
 701net2272_start_dma(struct net2272 *dev)
 702{
 703	/* start platform's dma controller */
 704#ifdef CONFIG_PCI
 705	switch (dev->dev_id) {
 706	case PCI_DEVICE_ID_RDK1:
 707		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 708			dev->rdk1.plx9054_base_addr + DMACSR0);
 709		break;
 710	}
 711#endif
 712}
 713
 714/* returns 0 on success, else negative errno */
 715static int
 716net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 717{
 718	unsigned size;
 719	u8 tmp;
 720
 721	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 722		return -EINVAL;
 723
 724	/* don't use dma for odd-length transfers
 725	 * otherwise, we'd need to deal with the last byte with pio
 726	 */
 727	if (req->req.length & 1)
 728		return -EINVAL;
 729
 730	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 731		ep->ep.name, req, (unsigned long long) req->req.dma);
 732
 733	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 734
 735	/* The NET2272 can only use DMA on one endpoint at a time */
 736	if (ep->dev->dma_busy)
 737		return -EBUSY;
 738
 739	/* Make sure we only DMA an even number of bytes (we'll use
 740	 * pio to complete the transfer)
 741	 */
 742	size = req->req.length;
 743	size &= ~1;
 744
 745	/* device-to-host transfer */
 746	if (ep->is_in) {
 747		/* initialize platform's dma controller */
 748		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 749			/* unable to obtain DMA channel; return error and use pio mode */
 750			return -EBUSY;
 751		req->req.actual += size;
 752
 753	/* host-to-device transfer */
 754	} else {
 755		tmp = net2272_ep_read(ep, EP_STAT0);
 756
 757		/* initialize platform's dma controller */
 758		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 759			/* unable to obtain DMA channel; return error and use pio mode */
 760			return -EBUSY;
 761
 762		if (!(tmp & (1 << BUFFER_EMPTY)))
 763			ep->not_empty = 1;
 764		else
 765			ep->not_empty = 0;
 766
 767
 768		/* allow the endpoint's buffer to fill */
 769		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 770
 771		/* this transfer completed and data's already in the fifo
 772		 * return error so pio gets used.
 773		 */
 774		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 775
 776			/* deassert dreq */
 777			net2272_write(ep->dev, DMAREQ,
 778				(0 << DMA_BUFFER_VALID) |
 779				(0 << DMA_REQUEST_ENABLE) |
 780				(1 << DMA_CONTROL_DACK) |
 781				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 782				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 783				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 784				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 785
 786			return -EBUSY;
 787		}
 788	}
 789
 790	/* Don't use per-packet interrupts: use dma interrupts only */
 791	net2272_ep_write(ep, EP_IRQENB, 0);
 792
 793	net2272_start_dma(ep->dev);
 794
 795	return 0;
 796}
 797
 798static void net2272_cancel_dma(struct net2272 *dev)
 799{
 800#ifdef CONFIG_PCI
 801	switch (dev->dev_id) {
 802	case PCI_DEVICE_ID_RDK1:
 803		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 804		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 805		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 806		         (1 << CHANNEL_DONE)))
 807			continue;	/* wait for dma to stabalize */
 808
 809		/* dma abort generates an interrupt */
 810		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 811			dev->rdk1.plx9054_base_addr + DMACSR0);
 812		break;
 813	}
 814#endif
 815
 816	dev->dma_busy = 0;
 817}
 818
 819/*---------------------------------------------------------------------------*/
 820
 821static int
 822net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 823{
 824	struct net2272_request *req;
 825	struct net2272_ep *ep;
 826	struct net2272 *dev;
 827	unsigned long flags;
 828	int status = -1;
 829	u8 s;
 830
 831	req = container_of(_req, struct net2272_request, req);
 832	if (!_req || !_req->complete || !_req->buf
 833			|| !list_empty(&req->queue))
 834		return -EINVAL;
 835	ep = container_of(_ep, struct net2272_ep, ep);
 836	if (!_ep || (!ep->desc && ep->num != 0))
 837		return -EINVAL;
 838	dev = ep->dev;
 839	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 840		return -ESHUTDOWN;
 841
 842	/* set up dma mapping in case the caller didn't */
 843	if (use_dma && ep->dma) {
 844		status = usb_gadget_map_request(&dev->gadget, _req,
 845				ep->is_in);
 846		if (status)
 847			return status;
 848	}
 849
 850	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 851		_ep->name, _req, _req->length, _req->buf,
 852		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 853
 854	spin_lock_irqsave(&dev->lock, flags);
 855
 856	_req->status = -EINPROGRESS;
 857	_req->actual = 0;
 858
 859	/* kickstart this i/o queue? */
 860	if (list_empty(&ep->queue) && !ep->stopped) {
 861		/* maybe there's no control data, just status ack */
 862		if (ep->num == 0 && _req->length == 0) {
 863			net2272_done(ep, req, 0);
 864			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 865			goto done;
 866		}
 867
 868		/* Return zlp, don't let it block subsequent packets */
 869		s = net2272_ep_read(ep, EP_STAT0);
 870		if (s & (1 << BUFFER_EMPTY)) {
 871			/* Buffer is empty check for a blocking zlp, handle it */
 872			if ((s & (1 << NAK_OUT_PACKETS)) &&
 873			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 874				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 875				/*
 876				 * Request is going to terminate with a short packet ...
 877				 * hope the client is ready for it!
 878				 */
 879				status = net2272_read_fifo(ep, req);
 880				/* clear short packet naking */
 881				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 882				goto done;
 883			}
 884		}
 885
 886		/* try dma first */
 887		status = net2272_kick_dma(ep, req);
 888
 889		if (status < 0) {
 890			/* dma failed (most likely in use by another endpoint)
 891			 * fallback to pio
 892			 */
 893			status = 0;
 894
 895			if (ep->is_in)
 896				status = net2272_write_fifo(ep, req);
 897			else {
 898				s = net2272_ep_read(ep, EP_STAT0);
 899				if ((s & (1 << BUFFER_EMPTY)) == 0)
 900					status = net2272_read_fifo(ep, req);
 901			}
 902
 903			if (unlikely(status != 0)) {
 904				if (status > 0)
 905					status = 0;
 906				req = NULL;
 907			}
 908		}
 909	}
 910	if (likely(req))
 911		list_add_tail(&req->queue, &ep->queue);
 912
 913	if (likely(!list_empty(&ep->queue)))
 914		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 915 done:
 916	spin_unlock_irqrestore(&dev->lock, flags);
 917
 918	return 0;
 919}
 920
 921/* dequeue ALL requests */
 922static void
 923net2272_dequeue_all(struct net2272_ep *ep)
 924{
 925	struct net2272_request *req;
 926
 927	/* called with spinlock held */
 928	ep->stopped = 1;
 929
 930	while (!list_empty(&ep->queue)) {
 931		req = list_entry(ep->queue.next,
 932				struct net2272_request,
 933				queue);
 934		net2272_done(ep, req, -ESHUTDOWN);
 935	}
 936}
 937
 938/* dequeue JUST ONE request */
 939static int
 940net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 941{
 942	struct net2272_ep *ep;
 943	struct net2272_request *req;
 944	unsigned long flags;
 945	int stopped;
 946
 947	ep = container_of(_ep, struct net2272_ep, ep);
 948	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 949		return -EINVAL;
 950
 951	spin_lock_irqsave(&ep->dev->lock, flags);
 952	stopped = ep->stopped;
 953	ep->stopped = 1;
 954
 955	/* make sure it's still queued on this endpoint */
 956	list_for_each_entry(req, &ep->queue, queue) {
 957		if (&req->req == _req)
 958			break;
 
 
 959	}
 960	if (&req->req != _req) {
 
 961		spin_unlock_irqrestore(&ep->dev->lock, flags);
 962		return -EINVAL;
 963	}
 964
 965	/* queue head may be partially complete */
 966	if (ep->queue.next == &req->queue) {
 967		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 968		net2272_done(ep, req, -ECONNRESET);
 969	}
 970	req = NULL;
 971	ep->stopped = stopped;
 972
 973	spin_unlock_irqrestore(&ep->dev->lock, flags);
 974	return 0;
 975}
 976
 977/*---------------------------------------------------------------------------*/
 978
 979static int
 980net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 981{
 982	struct net2272_ep *ep;
 983	unsigned long flags;
 984	int ret = 0;
 985
 986	ep = container_of(_ep, struct net2272_ep, ep);
 987	if (!_ep || (!ep->desc && ep->num != 0))
 988		return -EINVAL;
 989	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 990		return -ESHUTDOWN;
 991	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 992		return -EINVAL;
 993
 994	spin_lock_irqsave(&ep->dev->lock, flags);
 995	if (!list_empty(&ep->queue))
 996		ret = -EAGAIN;
 997	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
 998		ret = -EAGAIN;
 999	else {
1000		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1001			value ? "set" : "clear",
1002			wedged ? "wedge" : "halt");
1003		/* set/clear */
1004		if (value) {
1005			if (ep->num == 0)
1006				ep->dev->protocol_stall = 1;
1007			else
1008				set_halt(ep);
1009			if (wedged)
1010				ep->wedged = 1;
1011		} else {
1012			clear_halt(ep);
1013			ep->wedged = 0;
1014		}
1015	}
1016	spin_unlock_irqrestore(&ep->dev->lock, flags);
1017
1018	return ret;
1019}
1020
1021static int
1022net2272_set_halt(struct usb_ep *_ep, int value)
1023{
1024	return net2272_set_halt_and_wedge(_ep, value, 0);
1025}
1026
1027static int
1028net2272_set_wedge(struct usb_ep *_ep)
1029{
1030	if (!_ep || _ep->name == ep0name)
1031		return -EINVAL;
1032	return net2272_set_halt_and_wedge(_ep, 1, 1);
1033}
1034
1035static int
1036net2272_fifo_status(struct usb_ep *_ep)
1037{
1038	struct net2272_ep *ep;
1039	u16 avail;
1040
1041	ep = container_of(_ep, struct net2272_ep, ep);
1042	if (!_ep || (!ep->desc && ep->num != 0))
1043		return -ENODEV;
1044	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1045		return -ESHUTDOWN;
1046
1047	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1048	avail |= net2272_ep_read(ep, EP_AVAIL0);
1049	if (avail > ep->fifo_size)
1050		return -EOVERFLOW;
1051	if (ep->is_in)
1052		avail = ep->fifo_size - avail;
1053	return avail;
1054}
1055
1056static void
1057net2272_fifo_flush(struct usb_ep *_ep)
1058{
1059	struct net2272_ep *ep;
1060
1061	ep = container_of(_ep, struct net2272_ep, ep);
1062	if (!_ep || (!ep->desc && ep->num != 0))
1063		return;
1064	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1065		return;
1066
1067	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1068}
1069
1070static struct usb_ep_ops net2272_ep_ops = {
1071	.enable        = net2272_enable,
1072	.disable       = net2272_disable,
1073
1074	.alloc_request = net2272_alloc_request,
1075	.free_request  = net2272_free_request,
1076
1077	.queue         = net2272_queue,
1078	.dequeue       = net2272_dequeue,
1079
1080	.set_halt      = net2272_set_halt,
1081	.set_wedge     = net2272_set_wedge,
1082	.fifo_status   = net2272_fifo_status,
1083	.fifo_flush    = net2272_fifo_flush,
1084};
1085
1086/*---------------------------------------------------------------------------*/
1087
1088static int
1089net2272_get_frame(struct usb_gadget *_gadget)
1090{
1091	struct net2272 *dev;
1092	unsigned long flags;
1093	u16 ret;
1094
1095	if (!_gadget)
1096		return -ENODEV;
1097	dev = container_of(_gadget, struct net2272, gadget);
1098	spin_lock_irqsave(&dev->lock, flags);
1099
1100	ret = net2272_read(dev, FRAME1) << 8;
1101	ret |= net2272_read(dev, FRAME0);
1102
1103	spin_unlock_irqrestore(&dev->lock, flags);
1104	return ret;
1105}
1106
1107static int
1108net2272_wakeup(struct usb_gadget *_gadget)
1109{
1110	struct net2272 *dev;
1111	u8 tmp;
1112	unsigned long flags;
1113
1114	if (!_gadget)
1115		return 0;
1116	dev = container_of(_gadget, struct net2272, gadget);
1117
1118	spin_lock_irqsave(&dev->lock, flags);
1119	tmp = net2272_read(dev, USBCTL0);
1120	if (tmp & (1 << IO_WAKEUP_ENABLE))
1121		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1122
1123	spin_unlock_irqrestore(&dev->lock, flags);
1124
1125	return 0;
1126}
1127
1128static int
1129net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1130{
1131	if (!_gadget)
1132		return -ENODEV;
1133
1134	_gadget->is_selfpowered = (value != 0);
1135
1136	return 0;
1137}
1138
1139static int
1140net2272_pullup(struct usb_gadget *_gadget, int is_on)
1141{
1142	struct net2272 *dev;
1143	u8 tmp;
1144	unsigned long flags;
1145
1146	if (!_gadget)
1147		return -ENODEV;
1148	dev = container_of(_gadget, struct net2272, gadget);
1149
1150	spin_lock_irqsave(&dev->lock, flags);
1151	tmp = net2272_read(dev, USBCTL0);
1152	dev->softconnect = (is_on != 0);
1153	if (is_on)
1154		tmp |= (1 << USB_DETECT_ENABLE);
1155	else
1156		tmp &= ~(1 << USB_DETECT_ENABLE);
1157	net2272_write(dev, USBCTL0, tmp);
1158	spin_unlock_irqrestore(&dev->lock, flags);
1159
1160	return 0;
1161}
1162
1163static int net2272_start(struct usb_gadget *_gadget,
1164		struct usb_gadget_driver *driver);
1165static int net2272_stop(struct usb_gadget *_gadget);
 
1166
1167static const struct usb_gadget_ops net2272_ops = {
1168	.get_frame	= net2272_get_frame,
1169	.wakeup		= net2272_wakeup,
1170	.set_selfpowered = net2272_set_selfpowered,
1171	.pullup		= net2272_pullup,
1172	.udc_start	= net2272_start,
1173	.udc_stop	= net2272_stop,
 
1174};
1175
1176/*---------------------------------------------------------------------------*/
1177
1178static ssize_t
1179registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1180{
1181	struct net2272 *dev;
1182	char *next;
1183	unsigned size, t;
1184	unsigned long flags;
1185	u8 t1, t2;
1186	int i;
1187	const char *s;
1188
1189	dev = dev_get_drvdata(_dev);
1190	next = buf;
1191	size = PAGE_SIZE;
1192	spin_lock_irqsave(&dev->lock, flags);
1193
1194	if (dev->driver)
1195		s = dev->driver->driver.name;
1196	else
1197		s = "(none)";
1198
1199	/* Main Control Registers */
1200	t = scnprintf(next, size, "%s version %s,"
1201		"chiprev %02x, locctl %02x\n"
1202		"irqenb0 %02x irqenb1 %02x "
1203		"irqstat0 %02x irqstat1 %02x\n",
1204		driver_name, driver_vers, dev->chiprev,
1205		net2272_read(dev, LOCCTL),
1206		net2272_read(dev, IRQENB0),
1207		net2272_read(dev, IRQENB1),
1208		net2272_read(dev, IRQSTAT0),
1209		net2272_read(dev, IRQSTAT1));
1210	size -= t;
1211	next += t;
1212
1213	/* DMA */
1214	t1 = net2272_read(dev, DMAREQ);
1215	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1216		t1, ep_name[(t1 & 0x01) + 1],
1217		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1218		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1219		t1 & (1 << DMA_REQUEST) ? "req " : "",
1220		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1221	size -= t;
1222	next += t;
1223
1224	/* USB Control Registers */
1225	t1 = net2272_read(dev, USBCTL1);
1226	if (t1 & (1 << VBUS_PIN)) {
1227		if (t1 & (1 << USB_HIGH_SPEED))
1228			s = "high speed";
1229		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1230			s = "powered";
1231		else
1232			s = "full speed";
1233	} else
1234		s = "not attached";
1235	t = scnprintf(next, size,
1236		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1237		net2272_read(dev, USBCTL0), t1,
1238		net2272_read(dev, OURADDR), s);
1239	size -= t;
1240	next += t;
1241
1242	/* Endpoint Registers */
1243	for (i = 0; i < 4; ++i) {
1244		struct net2272_ep *ep;
1245
1246		ep = &dev->ep[i];
1247		if (i && !ep->desc)
1248			continue;
1249
1250		t1 = net2272_ep_read(ep, EP_CFG);
1251		t2 = net2272_ep_read(ep, EP_RSPSET);
1252		t = scnprintf(next, size,
1253			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1254			"irqenb %02x\n",
1255			ep->ep.name, t1, t2,
1256			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1257			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1258			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1259			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1260			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1261			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1262			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1263			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1264			net2272_ep_read(ep, EP_IRQENB));
1265		size -= t;
1266		next += t;
1267
1268		t = scnprintf(next, size,
1269			"\tstat0 %02x stat1 %02x avail %04x "
1270			"(ep%d%s-%s)%s\n",
1271			net2272_ep_read(ep, EP_STAT0),
1272			net2272_ep_read(ep, EP_STAT1),
1273			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1274			t1 & 0x0f,
1275			ep->is_in ? "in" : "out",
1276			type_string(t1 >> 5),
1277			ep->stopped ? "*" : "");
1278		size -= t;
1279		next += t;
1280
1281		t = scnprintf(next, size,
1282			"\tep_transfer %06x\n",
1283			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1284			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1285			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1286		size -= t;
1287		next += t;
1288
1289		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1290		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1291		t = scnprintf(next, size,
1292			"\tbuf-a %s buf-b %s\n",
1293			buf_state_string(t1),
1294			buf_state_string(t2));
1295		size -= t;
1296		next += t;
1297	}
1298
1299	spin_unlock_irqrestore(&dev->lock, flags);
1300
1301	return PAGE_SIZE - size;
1302}
1303static DEVICE_ATTR_RO(registers);
1304
1305/*---------------------------------------------------------------------------*/
1306
1307static void
1308net2272_set_fifo_mode(struct net2272 *dev, int mode)
1309{
1310	u8 tmp;
1311
1312	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1313	tmp |= (mode << 6);
1314	net2272_write(dev, LOCCTL, tmp);
1315
1316	INIT_LIST_HEAD(&dev->gadget.ep_list);
1317
1318	/* always ep-a, ep-c ... maybe not ep-b */
1319	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1320
1321	switch (mode) {
1322	case 0:
1323		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1324		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1325		break;
1326	case 1:
1327		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1328		dev->ep[1].fifo_size = 1024;
1329		dev->ep[2].fifo_size = 512;
1330		break;
1331	case 2:
1332		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1333		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1334		break;
1335	case 3:
1336		dev->ep[1].fifo_size = 1024;
1337		break;
1338	}
1339
1340	/* ep-c is always 2 512 byte buffers */
1341	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1342	dev->ep[3].fifo_size = 512;
1343}
1344
1345/*---------------------------------------------------------------------------*/
1346
1347static void
1348net2272_usb_reset(struct net2272 *dev)
1349{
1350	dev->gadget.speed = USB_SPEED_UNKNOWN;
1351
1352	net2272_cancel_dma(dev);
1353
1354	net2272_write(dev, IRQENB0, 0);
1355	net2272_write(dev, IRQENB1, 0);
1356
1357	/* clear irq state */
1358	net2272_write(dev, IRQSTAT0, 0xff);
1359	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1360
1361	net2272_write(dev, DMAREQ,
1362		(0 << DMA_BUFFER_VALID) |
1363		(0 << DMA_REQUEST_ENABLE) |
1364		(1 << DMA_CONTROL_DACK) |
1365		(dev->dma_eot_polarity << EOT_POLARITY) |
1366		(dev->dma_dack_polarity << DACK_POLARITY) |
1367		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1368		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1369
1370	net2272_cancel_dma(dev);
1371	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1372
1373	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1374	 * note that the higher level gadget drivers are expected to convert data to little endian.
1375	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1376	 */
1377	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1378	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1379}
1380
1381static void
1382net2272_usb_reinit(struct net2272 *dev)
1383{
1384	int i;
1385
1386	/* basic endpoint init */
1387	for (i = 0; i < 4; ++i) {
1388		struct net2272_ep *ep = &dev->ep[i];
1389
1390		ep->ep.name = ep_name[i];
1391		ep->dev = dev;
1392		ep->num = i;
1393		ep->not_empty = 0;
1394
1395		if (use_dma && ep->num == dma_ep)
1396			ep->dma = 1;
1397
1398		if (i > 0 && i <= 3)
1399			ep->fifo_size = 512;
1400		else
1401			ep->fifo_size = 64;
1402		net2272_ep_reset(ep);
1403
1404		if (i == 0) {
1405			ep->ep.caps.type_control = true;
1406		} else {
1407			ep->ep.caps.type_iso = true;
1408			ep->ep.caps.type_bulk = true;
1409			ep->ep.caps.type_int = true;
1410		}
1411
1412		ep->ep.caps.dir_in = true;
1413		ep->ep.caps.dir_out = true;
1414	}
1415	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1416
1417	dev->gadget.ep0 = &dev->ep[0].ep;
1418	dev->ep[0].stopped = 0;
1419	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1420}
1421
1422static void
1423net2272_ep0_start(struct net2272 *dev)
1424{
1425	struct net2272_ep *ep0 = &dev->ep[0];
1426
1427	net2272_ep_write(ep0, EP_RSPSET,
1428		(1 << NAK_OUT_PACKETS_MODE) |
1429		(1 << ALT_NAK_OUT_PACKETS));
1430	net2272_ep_write(ep0, EP_RSPCLR,
1431		(1 << HIDE_STATUS_PHASE) |
1432		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1433	net2272_write(dev, USBCTL0,
1434		(dev->softconnect << USB_DETECT_ENABLE) |
1435		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1436		(1 << IO_WAKEUP_ENABLE));
1437	net2272_write(dev, IRQENB0,
1438		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1439		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1440		(1 << DMA_DONE_INTERRUPT_ENABLE));
1441	net2272_write(dev, IRQENB1,
1442		(1 << VBUS_INTERRUPT_ENABLE) |
1443		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1444		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1445}
1446
1447/* when a driver is successfully registered, it will receive
1448 * control requests including set_configuration(), which enables
1449 * non-control requests.  then usb traffic follows until a
1450 * disconnect is reported.  then a host may connect again, or
1451 * the driver might get unbound.
1452 */
1453static int net2272_start(struct usb_gadget *_gadget,
1454		struct usb_gadget_driver *driver)
1455{
1456	struct net2272 *dev;
1457	unsigned i;
1458
1459	if (!driver || !driver->setup ||
1460	    driver->max_speed != USB_SPEED_HIGH)
1461		return -EINVAL;
1462
1463	dev = container_of(_gadget, struct net2272, gadget);
1464
1465	for (i = 0; i < 4; ++i)
1466		dev->ep[i].irqs = 0;
1467	/* hook up the driver ... */
1468	dev->softconnect = 1;
1469	driver->driver.bus = NULL;
1470	dev->driver = driver;
1471
1472	/* ... then enable host detection and ep0; and we're ready
1473	 * for set_configuration as well as eventual disconnect.
1474	 */
1475	net2272_ep0_start(dev);
1476
1477	return 0;
1478}
1479
1480static void
1481stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1482{
1483	int i;
1484
1485	/* don't disconnect if it's not connected */
1486	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1487		driver = NULL;
1488
1489	/* stop hardware; prevent new request submissions;
1490	 * and kill any outstanding requests.
1491	 */
1492	net2272_usb_reset(dev);
1493	for (i = 0; i < 4; ++i)
1494		net2272_dequeue_all(&dev->ep[i]);
1495
1496	/* report disconnect; the driver is already quiesced */
1497	if (driver) {
1498		spin_unlock(&dev->lock);
1499		driver->disconnect(&dev->gadget);
1500		spin_lock(&dev->lock);
1501	}
1502
1503	net2272_usb_reinit(dev);
1504}
1505
1506static int net2272_stop(struct usb_gadget *_gadget)
1507{
1508	struct net2272 *dev;
1509	unsigned long flags;
1510
1511	dev = container_of(_gadget, struct net2272, gadget);
1512
1513	spin_lock_irqsave(&dev->lock, flags);
1514	stop_activity(dev, NULL);
1515	spin_unlock_irqrestore(&dev->lock, flags);
1516
1517	dev->driver = NULL;
1518
1519	return 0;
1520}
1521
 
 
 
 
 
 
 
 
 
1522/*---------------------------------------------------------------------------*/
1523/* handle ep-a/ep-b dma completions */
1524static void
1525net2272_handle_dma(struct net2272_ep *ep)
1526{
1527	struct net2272_request *req;
1528	unsigned len;
1529	int status;
1530
1531	if (!list_empty(&ep->queue))
1532		req = list_entry(ep->queue.next,
1533				struct net2272_request, queue);
1534	else
1535		req = NULL;
1536
1537	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1538
1539	/* Ensure DREQ is de-asserted */
1540	net2272_write(ep->dev, DMAREQ,
1541		(0 << DMA_BUFFER_VALID)
1542	      | (0 << DMA_REQUEST_ENABLE)
1543	      | (1 << DMA_CONTROL_DACK)
1544	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1545	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1546	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1547	      | (ep->dma << DMA_ENDPOINT_SELECT));
1548
1549	ep->dev->dma_busy = 0;
1550
1551	net2272_ep_write(ep, EP_IRQENB,
1552		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1553		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1554		| net2272_ep_read(ep, EP_IRQENB));
1555
1556	/* device-to-host transfer completed */
1557	if (ep->is_in) {
1558		/* validate a short packet or zlp if necessary */
1559		if ((req->req.length % ep->ep.maxpacket != 0) ||
1560				req->req.zero)
1561			set_fifo_bytecount(ep, 0);
1562
1563		net2272_done(ep, req, 0);
1564		if (!list_empty(&ep->queue)) {
1565			req = list_entry(ep->queue.next,
1566					struct net2272_request, queue);
1567			status = net2272_kick_dma(ep, req);
1568			if (status < 0)
1569				net2272_pio_advance(ep);
1570		}
1571
1572	/* host-to-device transfer completed */
1573	} else {
1574		/* terminated with a short packet? */
1575		if (net2272_read(ep->dev, IRQSTAT0) &
1576				(1 << DMA_DONE_INTERRUPT)) {
1577			/* abort system dma */
1578			net2272_cancel_dma(ep->dev);
1579		}
1580
1581		/* EP_TRANSFER will contain the number of bytes
1582		 * actually received.
1583		 * NOTE: There is no overflow detection on EP_TRANSFER:
1584		 * We can't deal with transfers larger than 2^24 bytes!
1585		 */
1586		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1587			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1588			| (net2272_ep_read(ep, EP_TRANSFER0));
1589
1590		if (ep->not_empty)
1591			len += 4;
1592
1593		req->req.actual += len;
1594
1595		/* get any remaining data */
1596		net2272_pio_advance(ep);
1597	}
1598}
1599
1600/*---------------------------------------------------------------------------*/
1601
1602static void
1603net2272_handle_ep(struct net2272_ep *ep)
1604{
1605	struct net2272_request *req;
1606	u8 stat0, stat1;
1607
1608	if (!list_empty(&ep->queue))
1609		req = list_entry(ep->queue.next,
1610			struct net2272_request, queue);
1611	else
1612		req = NULL;
1613
1614	/* ack all, and handle what we care about */
1615	stat0 = net2272_ep_read(ep, EP_STAT0);
1616	stat1 = net2272_ep_read(ep, EP_STAT1);
1617	ep->irqs++;
1618
1619	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1620		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1621
1622	net2272_ep_write(ep, EP_STAT0, stat0 &
1623		~((1 << NAK_OUT_PACKETS)
1624		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1625	net2272_ep_write(ep, EP_STAT1, stat1);
1626
1627	/* data packet(s) received (in the fifo, OUT)
1628	 * direction must be validated, otherwise control read status phase
1629	 * could be interpreted as a valid packet
1630	 */
1631	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1632		net2272_pio_advance(ep);
1633	/* data packet(s) transmitted (IN) */
1634	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1635		net2272_pio_advance(ep);
1636}
1637
1638static struct net2272_ep *
1639net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1640{
1641	struct net2272_ep *ep;
1642
1643	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1644		return &dev->ep[0];
1645
1646	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1647		u8 bEndpointAddress;
1648
1649		if (!ep->desc)
1650			continue;
1651		bEndpointAddress = ep->desc->bEndpointAddress;
1652		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1653			continue;
1654		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1655			return ep;
1656	}
1657	return NULL;
1658}
1659
1660/*
1661 * USB Test Packet:
1662 * JKJKJKJK * 9
1663 * JJKKJJKK * 8
1664 * JJJJKKKK * 8
1665 * JJJJJJJKKKKKKK * 8
1666 * JJJJJJJK * 8
1667 * {JKKKKKKK * 10}, JK
1668 */
1669static const u8 net2272_test_packet[] = {
1670	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1671	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1672	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1673	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1674	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1675	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1676};
1677
1678static void
1679net2272_set_test_mode(struct net2272 *dev, int mode)
1680{
1681	int i;
1682
1683	/* Disable all net2272 interrupts:
1684	 * Nothing but a power cycle should stop the test.
1685	 */
1686	net2272_write(dev, IRQENB0, 0x00);
1687	net2272_write(dev, IRQENB1, 0x00);
1688
1689	/* Force tranceiver to high-speed */
1690	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1691
1692	net2272_write(dev, PAGESEL, 0);
1693	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1694	net2272_write(dev, EP_RSPCLR,
1695			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1696			| (1 << HIDE_STATUS_PHASE));
1697	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1698	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1699
1700	/* wait for status phase to complete */
1701	while (!(net2272_read(dev, EP_STAT0) &
1702				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1703		;
1704
1705	/* Enable test mode */
1706	net2272_write(dev, USBTEST, mode);
1707
1708	/* load test packet */
1709	if (mode == TEST_PACKET) {
1710		/* switch to 8 bit mode */
1711		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1712				~(1 << DATA_WIDTH));
1713
1714		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1715			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1716
1717		/* Validate test packet */
1718		net2272_write(dev, EP_TRANSFER0, 0);
1719	}
1720}
1721
1722static void
1723net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1724{
1725	struct net2272_ep *ep;
1726	u8 num, scratch;
1727
1728	/* starting a control request? */
1729	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1730		union {
1731			u8 raw[8];
1732			struct usb_ctrlrequest	r;
1733		} u;
1734		int tmp = 0;
1735		struct net2272_request *req;
1736
1737		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1738			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1739				dev->gadget.speed = USB_SPEED_HIGH;
1740			else
1741				dev->gadget.speed = USB_SPEED_FULL;
1742			dev_dbg(dev->dev, "%s\n",
1743				usb_speed_string(dev->gadget.speed));
1744		}
1745
1746		ep = &dev->ep[0];
1747		ep->irqs++;
1748
1749		/* make sure any leftover interrupt state is cleared */
1750		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1751		while (!list_empty(&ep->queue)) {
1752			req = list_entry(ep->queue.next,
1753				struct net2272_request, queue);
1754			net2272_done(ep, req,
1755				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1756		}
1757		ep->stopped = 0;
1758		dev->protocol_stall = 0;
1759		net2272_ep_write(ep, EP_STAT0,
1760			    (1 << DATA_IN_TOKEN_INTERRUPT)
1761			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1762			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1763			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1764			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1765		net2272_ep_write(ep, EP_STAT1,
1766			    (1 << TIMEOUT)
1767			  | (1 << USB_OUT_ACK_SENT)
1768			  | (1 << USB_OUT_NAK_SENT)
1769			  | (1 << USB_IN_ACK_RCVD)
1770			  | (1 << USB_IN_NAK_SENT)
1771			  | (1 << USB_STALL_SENT)
1772			  | (1 << LOCAL_OUT_ZLP));
1773
1774		/*
1775		 * Ensure Control Read pre-validation setting is beyond maximum size
1776		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1777		 *    an EP0 transfer following the Control Write is a Control Read,
1778		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1779		 *    pre-validation count.
1780		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1781		 *    the pre-validation count cannot cause an unexpected validatation
1782		 */
1783		net2272_write(dev, PAGESEL, 0);
1784		net2272_write(dev, EP_TRANSFER2, 0xff);
1785		net2272_write(dev, EP_TRANSFER1, 0xff);
1786		net2272_write(dev, EP_TRANSFER0, 0xff);
1787
1788		u.raw[0] = net2272_read(dev, SETUP0);
1789		u.raw[1] = net2272_read(dev, SETUP1);
1790		u.raw[2] = net2272_read(dev, SETUP2);
1791		u.raw[3] = net2272_read(dev, SETUP3);
1792		u.raw[4] = net2272_read(dev, SETUP4);
1793		u.raw[5] = net2272_read(dev, SETUP5);
1794		u.raw[6] = net2272_read(dev, SETUP6);
1795		u.raw[7] = net2272_read(dev, SETUP7);
1796		/*
1797		 * If you have a big endian cpu make sure le16_to_cpus
1798		 * performs the proper byte swapping here...
1799		 */
1800		le16_to_cpus(&u.r.wValue);
1801		le16_to_cpus(&u.r.wIndex);
1802		le16_to_cpus(&u.r.wLength);
1803
1804		/* ack the irq */
1805		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1806		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1807
1808		/* watch control traffic at the token level, and force
1809		 * synchronization before letting the status phase happen.
1810		 */
1811		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1812		if (ep->is_in) {
1813			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1814				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1815				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1816			stop_out_naking(ep);
1817		} else
1818			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1819				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1820				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1821		net2272_ep_write(ep, EP_IRQENB, scratch);
1822
1823		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1824			goto delegate;
1825		switch (u.r.bRequest) {
1826		case USB_REQ_GET_STATUS: {
1827			struct net2272_ep *e;
1828			u16 status = 0;
1829
1830			switch (u.r.bRequestType & USB_RECIP_MASK) {
1831			case USB_RECIP_ENDPOINT:
1832				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1833				if (!e || u.r.wLength > 2)
1834					goto do_stall;
1835				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1836					status = cpu_to_le16(1);
1837				else
1838					status = cpu_to_le16(0);
1839
1840				/* don't bother with a request object! */
1841				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1842				writew(status, net2272_reg_addr(dev, EP_DATA));
1843				set_fifo_bytecount(&dev->ep[0], 0);
1844				allow_status(ep);
1845				dev_vdbg(dev->dev, "%s stat %02x\n",
1846					ep->ep.name, status);
1847				goto next_endpoints;
1848			case USB_RECIP_DEVICE:
1849				if (u.r.wLength > 2)
1850					goto do_stall;
1851				if (dev->gadget.is_selfpowered)
1852					status = (1 << USB_DEVICE_SELF_POWERED);
1853
1854				/* don't bother with a request object! */
1855				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1856				writew(status, net2272_reg_addr(dev, EP_DATA));
1857				set_fifo_bytecount(&dev->ep[0], 0);
1858				allow_status(ep);
1859				dev_vdbg(dev->dev, "device stat %02x\n", status);
1860				goto next_endpoints;
1861			case USB_RECIP_INTERFACE:
1862				if (u.r.wLength > 2)
1863					goto do_stall;
1864
1865				/* don't bother with a request object! */
1866				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1867				writew(status, net2272_reg_addr(dev, EP_DATA));
1868				set_fifo_bytecount(&dev->ep[0], 0);
1869				allow_status(ep);
1870				dev_vdbg(dev->dev, "interface status %02x\n", status);
1871				goto next_endpoints;
1872			}
1873
1874			break;
1875		}
1876		case USB_REQ_CLEAR_FEATURE: {
1877			struct net2272_ep *e;
1878
1879			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1880				goto delegate;
1881			if (u.r.wValue != USB_ENDPOINT_HALT ||
1882			    u.r.wLength != 0)
1883				goto do_stall;
1884			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1885			if (!e)
1886				goto do_stall;
1887			if (e->wedged) {
1888				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1889					ep->ep.name);
1890			} else {
1891				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1892				clear_halt(e);
1893			}
1894			allow_status(ep);
1895			goto next_endpoints;
1896		}
1897		case USB_REQ_SET_FEATURE: {
1898			struct net2272_ep *e;
1899
1900			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1901				if (u.r.wIndex != NORMAL_OPERATION)
1902					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1903				allow_status(ep);
1904				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1905				goto next_endpoints;
1906			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1907				goto delegate;
1908			if (u.r.wValue != USB_ENDPOINT_HALT ||
1909			    u.r.wLength != 0)
1910				goto do_stall;
1911			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1912			if (!e)
1913				goto do_stall;
1914			set_halt(e);
1915			allow_status(ep);
1916			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1917			goto next_endpoints;
1918		}
1919		case USB_REQ_SET_ADDRESS: {
1920			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1921			allow_status(ep);
1922			break;
1923		}
1924		default:
1925 delegate:
1926			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1927				"ep_cfg %08x\n",
1928				u.r.bRequestType, u.r.bRequest,
1929				u.r.wValue, u.r.wIndex,
1930				net2272_ep_read(ep, EP_CFG));
1931			spin_unlock(&dev->lock);
1932			tmp = dev->driver->setup(&dev->gadget, &u.r);
1933			spin_lock(&dev->lock);
 
 
1934		}
1935
1936		/* stall ep0 on error */
1937		if (tmp < 0) {
1938 do_stall:
1939			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1940				u.r.bRequestType, u.r.bRequest, tmp);
1941			dev->protocol_stall = 1;
1942		}
1943	/* endpoint dma irq? */
1944	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1945		net2272_cancel_dma(dev);
1946		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1947		stat &= ~(1 << DMA_DONE_INTERRUPT);
1948		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1949			? 2 : 1;
1950
1951		ep = &dev->ep[num];
1952		net2272_handle_dma(ep);
1953	}
1954
1955 next_endpoints:
1956	/* endpoint data irq? */
1957	scratch = stat & 0x0f;
1958	stat &= ~0x0f;
1959	for (num = 0; scratch; num++) {
1960		u8 t;
1961
1962		/* does this endpoint's FIFO and queue need tending? */
1963		t = 1 << num;
1964		if ((scratch & t) == 0)
1965			continue;
1966		scratch ^= t;
1967
1968		ep = &dev->ep[num];
1969		net2272_handle_ep(ep);
1970	}
1971
1972	/* some interrupts we can just ignore */
1973	stat &= ~(1 << SOF_INTERRUPT);
1974
1975	if (stat)
1976		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1977}
1978
1979static void
1980net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1981{
1982	u8 tmp, mask;
1983
1984	/* after disconnect there's nothing else to do! */
1985	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1986	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1987
1988	if (stat & tmp) {
1989		bool	reset = false;
1990		bool	disconnect = false;
1991
1992		/*
1993		 * Ignore disconnects and resets if the speed hasn't been set.
1994		 * VBUS can bounce and there's always an initial reset.
1995		 */
1996		net2272_write(dev, IRQSTAT1, tmp);
1997		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1998			if ((stat & (1 << VBUS_INTERRUPT)) &&
1999					(net2272_read(dev, USBCTL1) &
2000						(1 << VBUS_PIN)) == 0) {
2001				disconnect = true;
2002				dev_dbg(dev->dev, "disconnect %s\n",
2003					dev->driver->driver.name);
2004			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2005					(net2272_read(dev, USBCTL1) & mask)
2006						== 0) {
2007				reset = true;
2008				dev_dbg(dev->dev, "reset %s\n",
2009					dev->driver->driver.name);
2010			}
2011
2012			if (disconnect || reset) {
2013				stop_activity(dev, dev->driver);
2014				net2272_ep0_start(dev);
2015				spin_unlock(&dev->lock);
2016				if (reset)
2017					usb_gadget_udc_reset
2018						(&dev->gadget, dev->driver);
2019				else
2020					(dev->driver->disconnect)
2021						(&dev->gadget);
2022				spin_lock(&dev->lock);
2023				return;
2024			}
2025		}
2026		stat &= ~tmp;
2027
2028		if (!stat)
2029			return;
2030	}
2031
2032	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2033	if (stat & tmp) {
2034		net2272_write(dev, IRQSTAT1, tmp);
2035		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2036			if (dev->driver->suspend)
2037				dev->driver->suspend(&dev->gadget);
2038			if (!enable_suspend) {
2039				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2040				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2041			}
2042		} else {
2043			if (dev->driver->resume)
2044				dev->driver->resume(&dev->gadget);
2045		}
2046		stat &= ~tmp;
2047	}
2048
2049	/* clear any other status/irqs */
2050	if (stat)
2051		net2272_write(dev, IRQSTAT1, stat);
2052
2053	/* some status we can just ignore */
2054	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2055			| (1 << SUSPEND_REQUEST_INTERRUPT)
2056			| (1 << RESUME_INTERRUPT));
2057	if (!stat)
2058		return;
2059	else
2060		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2061}
2062
2063static irqreturn_t net2272_irq(int irq, void *_dev)
2064{
2065	struct net2272 *dev = _dev;
2066#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2067	u32 intcsr;
2068#endif
2069#if defined(PLX_PCI_RDK)
2070	u8 dmareq;
2071#endif
2072	spin_lock(&dev->lock);
2073#if defined(PLX_PCI_RDK)
2074	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2075
2076	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2077		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2078				dev->rdk1.plx9054_base_addr + INTCSR);
2079		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2080		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2081		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2082		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2083			dev->rdk1.plx9054_base_addr + INTCSR);
2084	}
2085	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2086		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2087				dev->rdk1.plx9054_base_addr + DMACSR0);
2088
2089		dmareq = net2272_read(dev, DMAREQ);
2090		if (dmareq & 0x01)
2091			net2272_handle_dma(&dev->ep[2]);
2092		else
2093			net2272_handle_dma(&dev->ep[1]);
2094	}
2095#endif
2096#if defined(PLX_PCI_RDK2)
2097	/* see if PCI int for us by checking irqstat */
2098	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2099	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2100		spin_unlock(&dev->lock);
2101		return IRQ_NONE;
2102	}
2103	/* check dma interrupts */
2104#endif
2105	/* Platform/devcice interrupt handler */
2106#if !defined(PLX_PCI_RDK)
2107	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2108	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2109#endif
2110	spin_unlock(&dev->lock);
2111
2112	return IRQ_HANDLED;
2113}
2114
2115static int net2272_present(struct net2272 *dev)
2116{
2117	/*
2118	 * Quick test to see if CPU can communicate properly with the NET2272.
2119	 * Verifies connection using writes and reads to write/read and
2120	 * read-only registers.
2121	 *
2122	 * This routine is strongly recommended especially during early bring-up
2123	 * of new hardware, however for designs that do not apply Power On System
2124	 * Tests (POST) it may discarded (or perhaps minimized).
2125	 */
2126	unsigned int ii;
2127	u8 val, refval;
2128
2129	/* Verify NET2272 write/read SCRATCH register can write and read */
2130	refval = net2272_read(dev, SCRATCH);
2131	for (ii = 0; ii < 0x100; ii += 7) {
2132		net2272_write(dev, SCRATCH, ii);
2133		val = net2272_read(dev, SCRATCH);
2134		if (val != ii) {
2135			dev_dbg(dev->dev,
2136				"%s: write/read SCRATCH register test failed: "
2137				"wrote:0x%2.2x, read:0x%2.2x\n",
2138				__func__, ii, val);
2139			return -EINVAL;
2140		}
2141	}
2142	/* To be nice, we write the original SCRATCH value back: */
2143	net2272_write(dev, SCRATCH, refval);
2144
2145	/* Verify NET2272 CHIPREV register is read-only: */
2146	refval = net2272_read(dev, CHIPREV_2272);
2147	for (ii = 0; ii < 0x100; ii += 7) {
2148		net2272_write(dev, CHIPREV_2272, ii);
2149		val = net2272_read(dev, CHIPREV_2272);
2150		if (val != refval) {
2151			dev_dbg(dev->dev,
2152				"%s: write/read CHIPREV register test failed: "
2153				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2154				__func__, ii, val, refval);
2155			return -EINVAL;
2156		}
2157	}
2158
2159	/*
2160	 * Verify NET2272's "NET2270 legacy revision" register
2161	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2162	 *    register should read the same value, regardless of the NET2272
2163	 *    silicon revision.  The legacy register applies to NET2270
2164	 *    firmware being applied to the NET2272.
2165	 */
2166	val = net2272_read(dev, CHIPREV_LEGACY);
2167	if (val != NET2270_LEGACY_REV) {
2168		/*
2169		 * Unexpected legacy revision value
2170		 * - Perhaps the chip is a NET2270?
2171		 */
2172		dev_dbg(dev->dev,
2173			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2174			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2175			__func__, NET2270_LEGACY_REV, val);
2176		return -EINVAL;
2177	}
2178
2179	/*
2180	 * Verify NET2272 silicon revision
2181	 *  - This revision register is appropriate for the silicon version
2182	 *    of the NET2272
2183	 */
2184	val = net2272_read(dev, CHIPREV_2272);
2185	switch (val) {
2186	case CHIPREV_NET2272_R1:
2187		/*
2188		 * NET2272 Rev 1 has DMA related errata:
2189		 *  - Newer silicon (Rev 1A or better) required
2190		 */
2191		dev_dbg(dev->dev,
2192			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2193			__func__);
2194		break;
2195	case CHIPREV_NET2272_R1A:
2196		break;
2197	default:
2198		/* NET2272 silicon version *may* not work with this firmware */
2199		dev_dbg(dev->dev,
2200			"%s: unexpected silicon revision register value: "
2201			" CHIPREV_2272: 0x%2.2x\n",
2202			__func__, val);
2203		/*
2204		 * Return Success, even though the chip rev is not an expected value
2205		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2206		 *  - Often, new silicon is perfectly compatible
2207		 */
2208	}
2209
2210	/* Success: NET2272 checks out OK */
2211	return 0;
2212}
2213
2214static void
2215net2272_gadget_release(struct device *_dev)
2216{
2217	struct net2272 *dev = dev_get_drvdata(_dev);
 
2218	kfree(dev);
2219}
2220
2221/*---------------------------------------------------------------------------*/
2222
2223static void
2224net2272_remove(struct net2272 *dev)
2225{
2226	usb_del_gadget_udc(&dev->gadget);
 
2227	free_irq(dev->irq, dev);
2228	iounmap(dev->base_addr);
2229	device_remove_file(dev->dev, &dev_attr_registers);
2230
2231	dev_info(dev->dev, "unbind\n");
2232}
2233
2234static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2235{
2236	struct net2272 *ret;
2237
2238	if (!irq) {
2239		dev_dbg(dev, "No IRQ!\n");
2240		return ERR_PTR(-ENODEV);
2241	}
2242
2243	/* alloc, and start init */
2244	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2245	if (!ret)
2246		return ERR_PTR(-ENOMEM);
2247
2248	spin_lock_init(&ret->lock);
2249	ret->irq = irq;
2250	ret->dev = dev;
2251	ret->gadget.ops = &net2272_ops;
2252	ret->gadget.max_speed = USB_SPEED_HIGH;
2253
2254	/* the "gadget" abstracts/virtualizes the controller */
2255	ret->gadget.name = driver_name;
 
2256
2257	return ret;
2258}
2259
2260static int
2261net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2262{
2263	int ret;
2264
2265	/* See if there... */
2266	if (net2272_present(dev)) {
2267		dev_warn(dev->dev, "2272 not found!\n");
2268		ret = -ENODEV;
2269		goto err;
2270	}
2271
2272	net2272_usb_reset(dev);
2273	net2272_usb_reinit(dev);
2274
2275	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2276	if (ret) {
2277		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2278		goto err;
2279	}
2280
2281	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2282
2283	/* done */
2284	dev_info(dev->dev, "%s\n", driver_desc);
2285	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2286		dev->irq, dev->base_addr, dev->chiprev,
2287		dma_mode_string());
2288	dev_info(dev->dev, "version: %s\n", driver_vers);
2289
2290	ret = device_create_file(dev->dev, &dev_attr_registers);
2291	if (ret)
2292		goto err_irq;
2293
2294	ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2295			net2272_gadget_release);
2296	if (ret)
2297		goto err_add_udc;
 
2298
2299	return 0;
2300
2301err_add_udc:
2302	device_remove_file(dev->dev, &dev_attr_registers);
2303 err_irq:
2304	free_irq(dev->irq, dev);
2305 err:
2306	return ret;
2307}
2308
2309#ifdef CONFIG_PCI
2310
2311/*
2312 * wrap this driver around the specified device, but
2313 * don't respond over USB until a gadget driver binds to us
2314 */
2315
2316static int
2317net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2318{
2319	unsigned long resource, len, tmp;
2320	void __iomem *mem_mapped_addr[4];
2321	int ret, i;
2322
2323	/*
2324	 * BAR 0 holds PLX 9054 config registers
2325	 * BAR 1 is i/o memory; unused here
2326	 * BAR 2 holds EPLD config registers
2327	 * BAR 3 holds NET2272 registers
2328	 */
2329
2330	/* Find and map all address spaces */
2331	for (i = 0; i < 4; ++i) {
2332		if (i == 1)
2333			continue;	/* BAR1 unused */
2334
2335		resource = pci_resource_start(pdev, i);
2336		len = pci_resource_len(pdev, i);
2337
2338		if (!request_mem_region(resource, len, driver_name)) {
2339			dev_dbg(dev->dev, "controller already in use\n");
2340			ret = -EBUSY;
2341			goto err;
2342		}
2343
2344		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2345		if (mem_mapped_addr[i] == NULL) {
2346			release_mem_region(resource, len);
2347			dev_dbg(dev->dev, "can't map memory\n");
2348			ret = -EFAULT;
2349			goto err;
2350		}
2351	}
2352
2353	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2354	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2355	dev->base_addr = mem_mapped_addr[3];
2356
2357	/* Set PLX 9054 bus width (16 bits) */
2358	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2359	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2360			dev->rdk1.plx9054_base_addr + LBRD1);
2361
2362	/* Enable PLX 9054 Interrupts */
2363	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2364			(1 << PCI_INTERRUPT_ENABLE) |
2365			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2366			dev->rdk1.plx9054_base_addr + INTCSR);
2367
2368	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2369			dev->rdk1.plx9054_base_addr + DMACSR0);
2370
2371	/* reset */
2372	writeb((1 << EPLD_DMA_ENABLE) |
2373		(1 << DMA_CTL_DACK) |
2374		(1 << DMA_TIMEOUT_ENABLE) |
2375		(1 << USER) |
2376		(0 << MPX_MODE) |
2377		(1 << BUSWIDTH) |
2378		(1 << NET2272_RESET),
2379		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2380
2381	mb();
2382	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2383		~(1 << NET2272_RESET),
2384		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2385	udelay(200);
2386
2387	return 0;
2388
2389 err:
2390	while (--i >= 0) {
 
 
2391		iounmap(mem_mapped_addr[i]);
2392		release_mem_region(pci_resource_start(pdev, i),
2393			pci_resource_len(pdev, i));
2394	}
2395
2396	return ret;
2397}
2398
2399static int
2400net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2401{
2402	unsigned long resource, len;
2403	void __iomem *mem_mapped_addr[2];
2404	int ret, i;
2405
2406	/*
2407	 * BAR 0 holds FGPA config registers
2408	 * BAR 1 holds NET2272 registers
2409	 */
2410
2411	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2412	for (i = 0; i < 2; ++i) {
2413		resource = pci_resource_start(pdev, i);
2414		len = pci_resource_len(pdev, i);
2415
2416		if (!request_mem_region(resource, len, driver_name)) {
2417			dev_dbg(dev->dev, "controller already in use\n");
2418			ret = -EBUSY;
2419			goto err;
2420		}
2421
2422		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2423		if (mem_mapped_addr[i] == NULL) {
2424			release_mem_region(resource, len);
2425			dev_dbg(dev->dev, "can't map memory\n");
2426			ret = -EFAULT;
2427			goto err;
2428		}
2429	}
2430
2431	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2432	dev->base_addr = mem_mapped_addr[1];
2433
2434	mb();
2435	/* Set 2272 bus width (16 bits) and reset */
2436	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2437	udelay(200);
2438	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2439	/* Print fpga version number */
2440	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2441		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2442	/* Enable FPGA Interrupts */
2443	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2444
2445	return 0;
2446
2447 err:
2448	while (--i >= 0) {
2449		iounmap(mem_mapped_addr[i]);
2450		release_mem_region(pci_resource_start(pdev, i),
2451			pci_resource_len(pdev, i));
2452	}
2453
2454	return ret;
2455}
2456
2457static int
2458net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2459{
2460	struct net2272 *dev;
2461	int ret;
2462
2463	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2464	if (IS_ERR(dev))
2465		return PTR_ERR(dev);
2466	dev->dev_id = pdev->device;
2467
2468	if (pci_enable_device(pdev) < 0) {
2469		ret = -ENODEV;
2470		goto err_free;
2471	}
2472
2473	pci_set_master(pdev);
2474
2475	switch (pdev->device) {
2476	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2477	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2478	default: BUG();
2479	}
2480	if (ret)
2481		goto err_pci;
2482
2483	ret = net2272_probe_fin(dev, 0);
2484	if (ret)
2485		goto err_pci;
2486
2487	pci_set_drvdata(pdev, dev);
2488
2489	return 0;
2490
2491 err_pci:
2492	pci_disable_device(pdev);
2493 err_free:
2494	kfree(dev);
2495
2496	return ret;
2497}
2498
2499static void
2500net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2501{
2502	int i;
2503
2504	/* disable PLX 9054 interrupts */
2505	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2506		~(1 << PCI_INTERRUPT_ENABLE),
2507		dev->rdk1.plx9054_base_addr + INTCSR);
2508
2509	/* clean up resources allocated during probe() */
2510	iounmap(dev->rdk1.plx9054_base_addr);
2511	iounmap(dev->rdk1.epld_base_addr);
2512
2513	for (i = 0; i < 4; ++i) {
2514		if (i == 1)
2515			continue;	/* BAR1 unused */
2516		release_mem_region(pci_resource_start(pdev, i),
2517			pci_resource_len(pdev, i));
2518	}
2519}
2520
2521static void
2522net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2523{
2524	int i;
2525
2526	/* disable fpga interrupts
2527	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2528			~(1 << PCI_INTERRUPT_ENABLE),
2529			dev->rdk1.plx9054_base_addr + INTCSR);
2530	*/
2531
2532	/* clean up resources allocated during probe() */
2533	iounmap(dev->rdk2.fpga_base_addr);
2534
2535	for (i = 0; i < 2; ++i)
2536		release_mem_region(pci_resource_start(pdev, i),
2537			pci_resource_len(pdev, i));
2538}
2539
2540static void
2541net2272_pci_remove(struct pci_dev *pdev)
2542{
2543	struct net2272 *dev = pci_get_drvdata(pdev);
2544
2545	net2272_remove(dev);
2546
2547	switch (pdev->device) {
2548	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2549	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2550	default: BUG();
2551	}
2552
2553	pci_disable_device(pdev);
2554
2555	kfree(dev);
2556}
2557
2558/* Table of matching PCI IDs */
2559static struct pci_device_id pci_ids[] = {
2560	{	/* RDK 1 card */
2561		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562		.class_mask  = 0,
2563		.vendor      = PCI_VENDOR_ID_PLX,
2564		.device      = PCI_DEVICE_ID_RDK1,
2565		.subvendor   = PCI_ANY_ID,
2566		.subdevice   = PCI_ANY_ID,
2567	},
2568	{	/* RDK 2 card */
2569		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2570		.class_mask  = 0,
2571		.vendor      = PCI_VENDOR_ID_PLX,
2572		.device      = PCI_DEVICE_ID_RDK2,
2573		.subvendor   = PCI_ANY_ID,
2574		.subdevice   = PCI_ANY_ID,
2575	},
2576	{ }
2577};
2578MODULE_DEVICE_TABLE(pci, pci_ids);
2579
2580static struct pci_driver net2272_pci_driver = {
2581	.name     = driver_name,
2582	.id_table = pci_ids,
2583
2584	.probe    = net2272_pci_probe,
2585	.remove   = net2272_pci_remove,
2586};
2587
2588static int net2272_pci_register(void)
2589{
2590	return pci_register_driver(&net2272_pci_driver);
2591}
2592
2593static void net2272_pci_unregister(void)
2594{
2595	pci_unregister_driver(&net2272_pci_driver);
2596}
2597
2598#else
2599static inline int net2272_pci_register(void) { return 0; }
2600static inline void net2272_pci_unregister(void) { }
2601#endif
2602
2603/*---------------------------------------------------------------------------*/
2604
2605static int
2606net2272_plat_probe(struct platform_device *pdev)
2607{
2608	struct net2272 *dev;
2609	int ret;
2610	unsigned int irqflags;
2611	resource_size_t base, len;
2612	struct resource *iomem, *iomem_bus, *irq_res;
2613
2614	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2615	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2617	if (!irq_res || !iomem) {
2618		dev_err(&pdev->dev, "must provide irq/base addr");
2619		return -EINVAL;
2620	}
2621
2622	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2623	if (IS_ERR(dev))
2624		return PTR_ERR(dev);
2625
2626	irqflags = 0;
2627	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2628		irqflags |= IRQF_TRIGGER_RISING;
2629	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2630		irqflags |= IRQF_TRIGGER_FALLING;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2632		irqflags |= IRQF_TRIGGER_HIGH;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2634		irqflags |= IRQF_TRIGGER_LOW;
2635
2636	base = iomem->start;
2637	len = resource_size(iomem);
2638	if (iomem_bus)
2639		dev->base_shift = iomem_bus->start;
2640
2641	if (!request_mem_region(base, len, driver_name)) {
2642		dev_dbg(dev->dev, "get request memory region!\n");
2643		ret = -EBUSY;
2644		goto err;
2645	}
2646	dev->base_addr = ioremap_nocache(base, len);
2647	if (!dev->base_addr) {
2648		dev_dbg(dev->dev, "can't map memory\n");
2649		ret = -EFAULT;
2650		goto err_req;
2651	}
2652
2653	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2654	if (ret)
2655		goto err_io;
2656
2657	platform_set_drvdata(pdev, dev);
2658	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2659		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2660
2661	return 0;
2662
2663 err_io:
2664	iounmap(dev->base_addr);
2665 err_req:
2666	release_mem_region(base, len);
2667 err:
 
 
2668	return ret;
2669}
2670
2671static int
2672net2272_plat_remove(struct platform_device *pdev)
2673{
2674	struct net2272 *dev = platform_get_drvdata(pdev);
2675
2676	net2272_remove(dev);
2677
2678	release_mem_region(pdev->resource[0].start,
2679		resource_size(&pdev->resource[0]));
2680
2681	kfree(dev);
2682
2683	return 0;
2684}
2685
2686static struct platform_driver net2272_plat_driver = {
2687	.probe   = net2272_plat_probe,
2688	.remove  = net2272_plat_remove,
2689	.driver  = {
2690		.name  = driver_name,
2691	},
2692	/* FIXME .suspend, .resume */
2693};
2694MODULE_ALIAS("platform:net2272");
2695
2696static int __init net2272_init(void)
2697{
2698	int ret;
2699
2700	ret = net2272_pci_register();
2701	if (ret)
2702		return ret;
2703	ret = platform_driver_register(&net2272_plat_driver);
2704	if (ret)
2705		goto err_pci;
2706	return ret;
2707
2708err_pci:
2709	net2272_pci_unregister();
2710	return ret;
2711}
2712module_init(net2272_init);
2713
2714static void __exit net2272_cleanup(void)
2715{
2716	net2272_pci_unregister();
2717	platform_driver_unregister(&net2272_plat_driver);
2718}
2719module_exit(net2272_cleanup);
2720
2721MODULE_DESCRIPTION(DRIVER_DESC);
2722MODULE_AUTHOR("PLX Technology, Inc.");
2723MODULE_LICENSE("GPL");