Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Driver for PLX NET2272 USB device controller
   4 *
   5 * Copyright (C) 2005-2006 PLX Technology, Inc.
   6 * Copyright (C) 2006-2011 Analog Devices, Inc.
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/device.h>
  11#include <linux/errno.h>
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/ioport.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/pci.h>
  21#include <linux/platform_device.h>
  22#include <linux/prefetch.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <linux/usb.h>
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29
  30#include <asm/byteorder.h>
  31#include <asm/unaligned.h>
  32
  33#include "net2272.h"
  34
  35#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  36
  37static const char driver_name[] = "net2272";
  38static const char driver_vers[] = "2006 October 17/mainline";
  39static const char driver_desc[] = DRIVER_DESC;
  40
  41static const char ep0name[] = "ep0";
  42static const char * const ep_name[] = {
  43	ep0name,
  44	"ep-a", "ep-b", "ep-c",
  45};
  46
  47#ifdef CONFIG_USB_NET2272_DMA
  48/*
  49 * use_dma: the NET2272 can use an external DMA controller.
  50 * Note that since there is no generic DMA api, some functions,
  51 * notably request_dma, start_dma, and cancel_dma will need to be
  52 * modified for your platform's particular dma controller.
  53 *
  54 * If use_dma is disabled, pio will be used instead.
  55 */
  56static bool use_dma = false;
  57module_param(use_dma, bool, 0644);
  58
  59/*
  60 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  61 * The NET2272 can only use dma for a single endpoint at a time.
  62 * At some point this could be modified to allow either endpoint
  63 * to take control of dma as it becomes available.
  64 *
  65 * Note that DMA should not be used on OUT endpoints unless it can
  66 * be guaranteed that no short packets will arrive on an IN endpoint
  67 * while the DMA operation is pending.  Otherwise the OUT DMA will
  68 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  69 */
  70static ushort dma_ep = 1;
  71module_param(dma_ep, ushort, 0644);
  72
  73/*
  74 * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
  75 *	mode 0 == Slow DREQ mode
  76 *	mode 1 == Fast DREQ mode
  77 *	mode 2 == Burst mode
  78 */
  79static ushort dma_mode = 2;
  80module_param(dma_mode, ushort, 0644);
  81#else
  82#define use_dma 0
  83#define dma_ep 1
  84#define dma_mode 2
  85#endif
  86
  87/*
  88 * fifo_mode: net2272 buffer configuration:
  89 *      mode 0 == ep-{a,b,c} 512db each
  90 *      mode 1 == ep-a 1k, ep-{b,c} 512db
  91 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
  92 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
  93 */
  94static ushort fifo_mode;
  95module_param(fifo_mode, ushort, 0644);
  96
  97/*
  98 * enable_suspend: When enabled, the driver will respond to
  99 * USB suspend requests by powering down the NET2272.  Otherwise,
 100 * USB suspend requests will be ignored.  This is acceptable for
 101 * self-powered devices.  For bus powered devices set this to 1.
 102 */
 103static ushort enable_suspend;
 104module_param(enable_suspend, ushort, 0644);
 105
 106static void assert_out_naking(struct net2272_ep *ep, const char *where)
 107{
 108	u8 tmp;
 109
 110#ifndef DEBUG
 111	return;
 112#endif
 113
 114	tmp = net2272_ep_read(ep, EP_STAT0);
 115	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 116		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 117			ep->ep.name, where, tmp);
 118		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 119	}
 120}
 121#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 122
 123static void stop_out_naking(struct net2272_ep *ep)
 124{
 125	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 126
 127	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 128		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 129}
 130
 131#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 132
 133static char *type_string(u8 bmAttributes)
 134{
 135	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 136	case USB_ENDPOINT_XFER_BULK: return "bulk";
 137	case USB_ENDPOINT_XFER_ISOC: return "iso";
 138	case USB_ENDPOINT_XFER_INT:  return "intr";
 139	default:                     return "control";
 140	}
 141}
 142
 143static char *buf_state_string(unsigned state)
 144{
 145	switch (state) {
 146	case BUFF_FREE:  return "free";
 147	case BUFF_VALID: return "valid";
 148	case BUFF_LCL:   return "local";
 149	case BUFF_USB:   return "usb";
 150	default:         return "unknown";
 151	}
 152}
 153
 154static char *dma_mode_string(void)
 155{
 156	if (!use_dma)
 157		return "PIO";
 158	switch (dma_mode) {
 159	case 0:  return "SLOW DREQ";
 160	case 1:  return "FAST DREQ";
 161	case 2:  return "BURST";
 162	default: return "invalid";
 163	}
 164}
 165
 166static void net2272_dequeue_all(struct net2272_ep *);
 167static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 168static int net2272_fifo_status(struct usb_ep *);
 169
 170static const struct usb_ep_ops net2272_ep_ops;
 171
 172/*---------------------------------------------------------------------------*/
 173
 174static int
 175net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 176{
 177	struct net2272 *dev;
 178	struct net2272_ep *ep;
 179	u32 max;
 180	u8 tmp;
 181	unsigned long flags;
 182
 183	ep = container_of(_ep, struct net2272_ep, ep);
 184	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 185			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 186		return -EINVAL;
 187	dev = ep->dev;
 188	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 189		return -ESHUTDOWN;
 190
 191	max = usb_endpoint_maxp(desc);
 192
 193	spin_lock_irqsave(&dev->lock, flags);
 194	_ep->maxpacket = max;
 195	ep->desc = desc;
 196
 197	/* net2272_ep_reset() has already been called */
 198	ep->stopped = 0;
 199	ep->wedged = 0;
 200
 201	/* set speed-dependent max packet */
 202	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 203	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 204
 205	/* set type, direction, address; reset fifo counters */
 206	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 207	tmp = usb_endpoint_type(desc);
 208	if (usb_endpoint_xfer_bulk(desc)) {
 209		/* catch some particularly blatant driver bugs */
 210		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 211		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 212			spin_unlock_irqrestore(&dev->lock, flags);
 213			return -ERANGE;
 214		}
 215	}
 216	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 217	tmp <<= ENDPOINT_TYPE;
 218	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 219	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 220	tmp |= (1 << ENDPOINT_ENABLE);
 221
 222	/* for OUT transfers, block the rx fifo until a read is posted */
 223	ep->is_in = usb_endpoint_dir_in(desc);
 224	if (!ep->is_in)
 225		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 226
 227	net2272_ep_write(ep, EP_CFG, tmp);
 228
 229	/* enable irqs */
 230	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 231	net2272_write(dev, IRQENB0, tmp);
 232
 233	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 234		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 235		| net2272_ep_read(ep, EP_IRQENB);
 236	net2272_ep_write(ep, EP_IRQENB, tmp);
 237
 238	tmp = desc->bEndpointAddress;
 239	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 240		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 241		type_string(desc->bmAttributes), max,
 242		net2272_ep_read(ep, EP_CFG));
 243
 244	spin_unlock_irqrestore(&dev->lock, flags);
 245	return 0;
 246}
 247
 248static void net2272_ep_reset(struct net2272_ep *ep)
 249{
 250	u8 tmp;
 251
 252	ep->desc = NULL;
 253	INIT_LIST_HEAD(&ep->queue);
 254
 255	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 256	ep->ep.ops = &net2272_ep_ops;
 257
 258	/* disable irqs, endpoint */
 259	net2272_ep_write(ep, EP_IRQENB, 0);
 260
 261	/* init to our chosen defaults, notably so that we NAK OUT
 262	 * packets until the driver queues a read.
 263	 */
 264	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 265	net2272_ep_write(ep, EP_RSPSET, tmp);
 266
 267	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 268	if (ep->num != 0)
 269		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 270
 271	net2272_ep_write(ep, EP_RSPCLR, tmp);
 272
 273	/* scrub most status bits, and flush any fifo state */
 274	net2272_ep_write(ep, EP_STAT0,
 275			  (1 << DATA_IN_TOKEN_INTERRUPT)
 276			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 277			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 278			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 279			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 280
 281	net2272_ep_write(ep, EP_STAT1,
 282			    (1 << TIMEOUT)
 283			  | (1 << USB_OUT_ACK_SENT)
 284			  | (1 << USB_OUT_NAK_SENT)
 285			  | (1 << USB_IN_ACK_RCVD)
 286			  | (1 << USB_IN_NAK_SENT)
 287			  | (1 << USB_STALL_SENT)
 288			  | (1 << LOCAL_OUT_ZLP)
 289			  | (1 << BUFFER_FLUSH));
 290
 291	/* fifo size is handled separately */
 292}
 293
 294static int net2272_disable(struct usb_ep *_ep)
 295{
 296	struct net2272_ep *ep;
 297	unsigned long flags;
 298
 299	ep = container_of(_ep, struct net2272_ep, ep);
 300	if (!_ep || !ep->desc || _ep->name == ep0name)
 301		return -EINVAL;
 302
 303	spin_lock_irqsave(&ep->dev->lock, flags);
 304	net2272_dequeue_all(ep);
 305	net2272_ep_reset(ep);
 306
 307	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 308
 309	spin_unlock_irqrestore(&ep->dev->lock, flags);
 310	return 0;
 311}
 312
 313/*---------------------------------------------------------------------------*/
 314
 315static struct usb_request *
 316net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 317{
 318	struct net2272_request *req;
 319
 320	if (!_ep)
 321		return NULL;
 322
 323	req = kzalloc(sizeof(*req), gfp_flags);
 324	if (!req)
 325		return NULL;
 326
 327	INIT_LIST_HEAD(&req->queue);
 328
 329	return &req->req;
 330}
 331
 332static void
 333net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 334{
 335	struct net2272_request *req;
 336
 337	if (!_ep || !_req)
 338		return;
 339
 340	req = container_of(_req, struct net2272_request, req);
 341	WARN_ON(!list_empty(&req->queue));
 342	kfree(req);
 343}
 344
 345static void
 346net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 347{
 348	struct net2272 *dev;
 349	unsigned stopped = ep->stopped;
 350
 351	if (ep->num == 0) {
 352		if (ep->dev->protocol_stall) {
 353			ep->stopped = 1;
 354			set_halt(ep);
 355		}
 356		allow_status(ep);
 357	}
 358
 359	list_del_init(&req->queue);
 360
 361	if (req->req.status == -EINPROGRESS)
 362		req->req.status = status;
 363	else
 364		status = req->req.status;
 365
 366	dev = ep->dev;
 367	if (use_dma && ep->dma)
 368		usb_gadget_unmap_request(&dev->gadget, &req->req,
 369				ep->is_in);
 370
 371	if (status && status != -ESHUTDOWN)
 372		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 373			ep->ep.name, &req->req, status,
 374			req->req.actual, req->req.length, req->req.buf);
 375
 376	/* don't modify queue heads during completion callback */
 377	ep->stopped = 1;
 378	spin_unlock(&dev->lock);
 379	usb_gadget_giveback_request(&ep->ep, &req->req);
 380	spin_lock(&dev->lock);
 381	ep->stopped = stopped;
 382}
 383
 384static int
 385net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 386	struct net2272_request *req, unsigned max)
 387{
 388	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 389	u16 *bufp;
 390	unsigned length, count;
 391	u8 tmp;
 392
 393	length = min(req->req.length - req->req.actual, max);
 394	req->req.actual += length;
 395
 396	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 397		ep->ep.name, req, max, length,
 398		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 399
 400	count = length;
 401	bufp = (u16 *)buf;
 402
 403	while (likely(count >= 2)) {
 404		/* no byte-swap required; chip endian set during init */
 405		writew(*bufp++, ep_data);
 406		count -= 2;
 407	}
 408	buf = (u8 *)bufp;
 409
 410	/* write final byte by placing the NET2272 into 8-bit mode */
 411	if (unlikely(count)) {
 412		tmp = net2272_read(ep->dev, LOCCTL);
 413		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 414		writeb(*buf, ep_data);
 415		net2272_write(ep->dev, LOCCTL, tmp);
 416	}
 417	return length;
 418}
 419
 420/* returns: 0: still running, 1: completed, negative: errno */
 421static int
 422net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 423{
 424	u8 *buf;
 425	unsigned count, max;
 426	int status;
 427
 428	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 429		ep->ep.name, req->req.actual, req->req.length);
 430
 431	/*
 432	 * Keep loading the endpoint until the final packet is loaded,
 433	 * or the endpoint buffer is full.
 434	 */
 435 top:
 436	/*
 437	 * Clear interrupt status
 438	 *  - Packet Transmitted interrupt will become set again when the
 439	 *    host successfully takes another packet
 440	 */
 441	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 442	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 443		buf = req->req.buf + req->req.actual;
 444		prefetch(buf);
 445
 446		/* force pagesel */
 447		net2272_ep_read(ep, EP_STAT0);
 448
 449		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 450			(net2272_ep_read(ep, EP_AVAIL0));
 451
 452		if (max < ep->ep.maxpacket)
 453			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 454				| (net2272_ep_read(ep, EP_AVAIL0));
 455
 456		count = net2272_write_packet(ep, buf, req, max);
 457		/* see if we are done */
 458		if (req->req.length == req->req.actual) {
 459			/* validate short or zlp packet */
 460			if (count < ep->ep.maxpacket)
 461				set_fifo_bytecount(ep, 0);
 462			net2272_done(ep, req, 0);
 463
 464			if (!list_empty(&ep->queue)) {
 465				req = list_entry(ep->queue.next,
 466						struct net2272_request,
 467						queue);
 468				status = net2272_kick_dma(ep, req);
 469
 470				if (status < 0)
 471					if ((net2272_ep_read(ep, EP_STAT0)
 472							& (1 << BUFFER_EMPTY)))
 473						goto top;
 474			}
 475			return 1;
 476		}
 477		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 478	}
 479	return 0;
 480}
 481
 482static void
 483net2272_out_flush(struct net2272_ep *ep)
 484{
 485	ASSERT_OUT_NAKING(ep);
 486
 487	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 488			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 489	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 490}
 491
 492static int
 493net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 494	struct net2272_request *req, unsigned avail)
 495{
 496	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 497	unsigned is_short;
 498	u16 *bufp;
 499
 500	req->req.actual += avail;
 501
 502	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 503		ep->ep.name, req, avail,
 504		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 505
 506	is_short = (avail < ep->ep.maxpacket);
 507
 508	if (unlikely(avail == 0)) {
 509		/* remove any zlp from the buffer */
 510		(void)readw(ep_data);
 511		return is_short;
 512	}
 513
 514	/* Ensure we get the final byte */
 515	if (unlikely(avail % 2))
 516		avail++;
 517	bufp = (u16 *)buf;
 518
 519	do {
 520		*bufp++ = readw(ep_data);
 521		avail -= 2;
 522	} while (avail);
 523
 524	/*
 525	 * To avoid false endpoint available race condition must read
 526	 * ep stat0 twice in the case of a short transfer
 527	 */
 528	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 529		net2272_ep_read(ep, EP_STAT0);
 530
 531	return is_short;
 532}
 533
 534static int
 535net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 536{
 537	u8 *buf;
 538	unsigned is_short;
 539	int count;
 540	int tmp;
 541	int cleanup = 0;
 542
 543	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 544		ep->ep.name, req->req.actual, req->req.length);
 545
 546 top:
 547	do {
 548		buf = req->req.buf + req->req.actual;
 549		prefetchw(buf);
 550
 551		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 552			| net2272_ep_read(ep, EP_AVAIL0);
 553
 554		net2272_ep_write(ep, EP_STAT0,
 555			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 556			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 557
 558		tmp = req->req.length - req->req.actual;
 559
 560		if (count > tmp) {
 561			if ((tmp % ep->ep.maxpacket) != 0) {
 562				dev_err(ep->dev->dev,
 563					"%s out fifo %d bytes, expected %d\n",
 564					ep->ep.name, count, tmp);
 565				cleanup = 1;
 566			}
 567			count = (tmp > 0) ? tmp : 0;
 568		}
 569
 570		is_short = net2272_read_packet(ep, buf, req, count);
 571
 572		/* completion */
 573		if (unlikely(cleanup || is_short ||
 574				req->req.actual == req->req.length)) {
 575
 576			if (cleanup) {
 577				net2272_out_flush(ep);
 578				net2272_done(ep, req, -EOVERFLOW);
 579			} else
 580				net2272_done(ep, req, 0);
 581
 582			/* re-initialize endpoint transfer registers
 583			 * otherwise they may result in erroneous pre-validation
 584			 * for subsequent control reads
 585			 */
 586			if (unlikely(ep->num == 0)) {
 587				net2272_ep_write(ep, EP_TRANSFER2, 0);
 588				net2272_ep_write(ep, EP_TRANSFER1, 0);
 589				net2272_ep_write(ep, EP_TRANSFER0, 0);
 590			}
 591
 592			if (!list_empty(&ep->queue)) {
 593				int status;
 594
 595				req = list_entry(ep->queue.next,
 596					struct net2272_request, queue);
 597				status = net2272_kick_dma(ep, req);
 598				if ((status < 0) &&
 599				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 600					goto top;
 601			}
 602			return 1;
 603		}
 604	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 605
 606	return 0;
 607}
 608
 609static void
 610net2272_pio_advance(struct net2272_ep *ep)
 611{
 612	struct net2272_request *req;
 613
 614	if (unlikely(list_empty(&ep->queue)))
 615		return;
 616
 617	req = list_entry(ep->queue.next, struct net2272_request, queue);
 618	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 619}
 620
 621/* returns 0 on success, else negative errno */
 622static int
 623net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 624	unsigned len, unsigned dir)
 625{
 626	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 627		ep, buf, len, dir);
 628
 629	/* The NET2272 only supports a single dma channel */
 630	if (dev->dma_busy)
 631		return -EBUSY;
 632	/*
 633	 * EP_TRANSFER (used to determine the number of bytes received
 634	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 635	 */
 636	if ((dir == 1) && (len > 0x1000000))
 637		return -EINVAL;
 638
 639	dev->dma_busy = 1;
 640
 641	/* initialize platform's dma */
 642#ifdef CONFIG_USB_PCI
 643	/* NET2272 addr, buffer addr, length, etc. */
 644	switch (dev->dev_id) {
 645	case PCI_DEVICE_ID_RDK1:
 646		/* Setup PLX 9054 DMA mode */
 647		writel((1 << LOCAL_BUS_WIDTH) |
 648			(1 << TA_READY_INPUT_ENABLE) |
 649			(0 << LOCAL_BURST_ENABLE) |
 650			(1 << DONE_INTERRUPT_ENABLE) |
 651			(1 << LOCAL_ADDRESSING_MODE) |
 652			(1 << DEMAND_MODE) |
 653			(1 << DMA_EOT_ENABLE) |
 654			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 655			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 656			dev->rdk1.plx9054_base_addr + DMAMODE0);
 657
 658		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 659		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 660		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 661		writel((dir << DIRECTION_OF_TRANSFER) |
 662			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 663			dev->rdk1.plx9054_base_addr + DMADPR0);
 664		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 665			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 666			dev->rdk1.plx9054_base_addr + INTCSR);
 667
 668		break;
 669	}
 670#endif
 671
 672	net2272_write(dev, DMAREQ,
 673		(0 << DMA_BUFFER_VALID) |
 674		(1 << DMA_REQUEST_ENABLE) |
 675		(1 << DMA_CONTROL_DACK) |
 676		(dev->dma_eot_polarity << EOT_POLARITY) |
 677		(dev->dma_dack_polarity << DACK_POLARITY) |
 678		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 679		((ep >> 1) << DMA_ENDPOINT_SELECT));
 680
 681	(void) net2272_read(dev, SCRATCH);
 682
 683	return 0;
 684}
 685
 686static void
 687net2272_start_dma(struct net2272 *dev)
 688{
 689	/* start platform's dma controller */
 690#ifdef CONFIG_USB_PCI
 691	switch (dev->dev_id) {
 692	case PCI_DEVICE_ID_RDK1:
 693		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 694			dev->rdk1.plx9054_base_addr + DMACSR0);
 695		break;
 696	}
 697#endif
 698}
 699
 700/* returns 0 on success, else negative errno */
 701static int
 702net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 703{
 704	unsigned size;
 705	u8 tmp;
 706
 707	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 708		return -EINVAL;
 709
 710	/* don't use dma for odd-length transfers
 711	 * otherwise, we'd need to deal with the last byte with pio
 712	 */
 713	if (req->req.length & 1)
 714		return -EINVAL;
 715
 716	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 717		ep->ep.name, req, (unsigned long long) req->req.dma);
 718
 719	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 720
 721	/* The NET2272 can only use DMA on one endpoint at a time */
 722	if (ep->dev->dma_busy)
 723		return -EBUSY;
 724
 725	/* Make sure we only DMA an even number of bytes (we'll use
 726	 * pio to complete the transfer)
 727	 */
 728	size = req->req.length;
 729	size &= ~1;
 730
 731	/* device-to-host transfer */
 732	if (ep->is_in) {
 733		/* initialize platform's dma controller */
 734		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 735			/* unable to obtain DMA channel; return error and use pio mode */
 736			return -EBUSY;
 737		req->req.actual += size;
 738
 739	/* host-to-device transfer */
 740	} else {
 741		tmp = net2272_ep_read(ep, EP_STAT0);
 742
 743		/* initialize platform's dma controller */
 744		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 745			/* unable to obtain DMA channel; return error and use pio mode */
 746			return -EBUSY;
 747
 748		if (!(tmp & (1 << BUFFER_EMPTY)))
 749			ep->not_empty = 1;
 750		else
 751			ep->not_empty = 0;
 752
 753
 754		/* allow the endpoint's buffer to fill */
 755		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 756
 757		/* this transfer completed and data's already in the fifo
 758		 * return error so pio gets used.
 759		 */
 760		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 761
 762			/* deassert dreq */
 763			net2272_write(ep->dev, DMAREQ,
 764				(0 << DMA_BUFFER_VALID) |
 765				(0 << DMA_REQUEST_ENABLE) |
 766				(1 << DMA_CONTROL_DACK) |
 767				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 768				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 769				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 770				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 771
 772			return -EBUSY;
 773		}
 774	}
 775
 776	/* Don't use per-packet interrupts: use dma interrupts only */
 777	net2272_ep_write(ep, EP_IRQENB, 0);
 778
 779	net2272_start_dma(ep->dev);
 780
 781	return 0;
 782}
 783
 784static void net2272_cancel_dma(struct net2272 *dev)
 785{
 786#ifdef CONFIG_USB_PCI
 787	switch (dev->dev_id) {
 788	case PCI_DEVICE_ID_RDK1:
 789		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 790		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 791		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 792		         (1 << CHANNEL_DONE)))
 793			continue;	/* wait for dma to stabalize */
 794
 795		/* dma abort generates an interrupt */
 796		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 797			dev->rdk1.plx9054_base_addr + DMACSR0);
 798		break;
 799	}
 800#endif
 801
 802	dev->dma_busy = 0;
 803}
 804
 805/*---------------------------------------------------------------------------*/
 806
 807static int
 808net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 809{
 810	struct net2272_request *req;
 811	struct net2272_ep *ep;
 812	struct net2272 *dev;
 813	unsigned long flags;
 814	int status = -1;
 815	u8 s;
 816
 817	req = container_of(_req, struct net2272_request, req);
 818	if (!_req || !_req->complete || !_req->buf
 819			|| !list_empty(&req->queue))
 820		return -EINVAL;
 821	ep = container_of(_ep, struct net2272_ep, ep);
 822	if (!_ep || (!ep->desc && ep->num != 0))
 823		return -EINVAL;
 824	dev = ep->dev;
 825	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 826		return -ESHUTDOWN;
 827
 828	/* set up dma mapping in case the caller didn't */
 829	if (use_dma && ep->dma) {
 830		status = usb_gadget_map_request(&dev->gadget, _req,
 831				ep->is_in);
 832		if (status)
 833			return status;
 834	}
 835
 836	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 837		_ep->name, _req, _req->length, _req->buf,
 838		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 839
 840	spin_lock_irqsave(&dev->lock, flags);
 841
 842	_req->status = -EINPROGRESS;
 843	_req->actual = 0;
 844
 845	/* kickstart this i/o queue? */
 846	if (list_empty(&ep->queue) && !ep->stopped) {
 847		/* maybe there's no control data, just status ack */
 848		if (ep->num == 0 && _req->length == 0) {
 849			net2272_done(ep, req, 0);
 850			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 851			goto done;
 852		}
 853
 854		/* Return zlp, don't let it block subsequent packets */
 855		s = net2272_ep_read(ep, EP_STAT0);
 856		if (s & (1 << BUFFER_EMPTY)) {
 857			/* Buffer is empty check for a blocking zlp, handle it */
 858			if ((s & (1 << NAK_OUT_PACKETS)) &&
 859			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 860				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 861				/*
 862				 * Request is going to terminate with a short packet ...
 863				 * hope the client is ready for it!
 864				 */
 865				status = net2272_read_fifo(ep, req);
 866				/* clear short packet naking */
 867				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 868				goto done;
 869			}
 870		}
 871
 872		/* try dma first */
 873		status = net2272_kick_dma(ep, req);
 874
 875		if (status < 0) {
 876			/* dma failed (most likely in use by another endpoint)
 877			 * fallback to pio
 878			 */
 879			status = 0;
 880
 881			if (ep->is_in)
 882				status = net2272_write_fifo(ep, req);
 883			else {
 884				s = net2272_ep_read(ep, EP_STAT0);
 885				if ((s & (1 << BUFFER_EMPTY)) == 0)
 886					status = net2272_read_fifo(ep, req);
 887			}
 888
 889			if (unlikely(status != 0)) {
 890				if (status > 0)
 891					status = 0;
 892				req = NULL;
 893			}
 894		}
 895	}
 896	if (likely(req))
 897		list_add_tail(&req->queue, &ep->queue);
 898
 899	if (likely(!list_empty(&ep->queue)))
 900		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 901 done:
 902	spin_unlock_irqrestore(&dev->lock, flags);
 903
 904	return 0;
 905}
 906
 907/* dequeue ALL requests */
 908static void
 909net2272_dequeue_all(struct net2272_ep *ep)
 910{
 911	struct net2272_request *req;
 912
 913	/* called with spinlock held */
 914	ep->stopped = 1;
 915
 916	while (!list_empty(&ep->queue)) {
 917		req = list_entry(ep->queue.next,
 918				struct net2272_request,
 919				queue);
 920		net2272_done(ep, req, -ESHUTDOWN);
 921	}
 922}
 923
 924/* dequeue JUST ONE request */
 925static int
 926net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 927{
 928	struct net2272_ep *ep;
 929	struct net2272_request *req = NULL, *iter;
 930	unsigned long flags;
 931	int stopped;
 932
 933	ep = container_of(_ep, struct net2272_ep, ep);
 934	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 935		return -EINVAL;
 936
 937	spin_lock_irqsave(&ep->dev->lock, flags);
 938	stopped = ep->stopped;
 939	ep->stopped = 1;
 940
 941	/* make sure it's still queued on this endpoint */
 942	list_for_each_entry(iter, &ep->queue, queue) {
 943		if (&iter->req != _req)
 944			continue;
 945		req = iter;
 946		break;
 947	}
 948	if (!req) {
 949		ep->stopped = stopped;
 950		spin_unlock_irqrestore(&ep->dev->lock, flags);
 951		return -EINVAL;
 952	}
 953
 954	/* queue head may be partially complete */
 955	if (ep->queue.next == &req->queue) {
 956		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 957		net2272_done(ep, req, -ECONNRESET);
 958	}
 959	ep->stopped = stopped;
 960
 961	spin_unlock_irqrestore(&ep->dev->lock, flags);
 962	return 0;
 963}
 964
 965/*---------------------------------------------------------------------------*/
 966
 967static int
 968net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 969{
 970	struct net2272_ep *ep;
 971	unsigned long flags;
 972	int ret = 0;
 973
 974	ep = container_of(_ep, struct net2272_ep, ep);
 975	if (!_ep || (!ep->desc && ep->num != 0))
 976		return -EINVAL;
 977	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 978		return -ESHUTDOWN;
 979	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 980		return -EINVAL;
 981
 982	spin_lock_irqsave(&ep->dev->lock, flags);
 983	if (!list_empty(&ep->queue))
 984		ret = -EAGAIN;
 985	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
 986		ret = -EAGAIN;
 987	else {
 988		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
 989			value ? "set" : "clear",
 990			wedged ? "wedge" : "halt");
 991		/* set/clear */
 992		if (value) {
 993			if (ep->num == 0)
 994				ep->dev->protocol_stall = 1;
 995			else
 996				set_halt(ep);
 997			if (wedged)
 998				ep->wedged = 1;
 999		} else {
1000			clear_halt(ep);
1001			ep->wedged = 0;
1002		}
1003	}
1004	spin_unlock_irqrestore(&ep->dev->lock, flags);
1005
1006	return ret;
1007}
1008
1009static int
1010net2272_set_halt(struct usb_ep *_ep, int value)
1011{
1012	return net2272_set_halt_and_wedge(_ep, value, 0);
1013}
1014
1015static int
1016net2272_set_wedge(struct usb_ep *_ep)
1017{
1018	if (!_ep || _ep->name == ep0name)
1019		return -EINVAL;
1020	return net2272_set_halt_and_wedge(_ep, 1, 1);
1021}
1022
1023static int
1024net2272_fifo_status(struct usb_ep *_ep)
1025{
1026	struct net2272_ep *ep;
1027	u16 avail;
1028
1029	ep = container_of(_ep, struct net2272_ep, ep);
1030	if (!_ep || (!ep->desc && ep->num != 0))
1031		return -ENODEV;
1032	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1033		return -ESHUTDOWN;
1034
1035	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1036	avail |= net2272_ep_read(ep, EP_AVAIL0);
1037	if (avail > ep->fifo_size)
1038		return -EOVERFLOW;
1039	if (ep->is_in)
1040		avail = ep->fifo_size - avail;
1041	return avail;
1042}
1043
1044static void
1045net2272_fifo_flush(struct usb_ep *_ep)
1046{
1047	struct net2272_ep *ep;
1048
1049	ep = container_of(_ep, struct net2272_ep, ep);
1050	if (!_ep || (!ep->desc && ep->num != 0))
1051		return;
1052	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1053		return;
1054
1055	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1056}
1057
1058static const struct usb_ep_ops net2272_ep_ops = {
1059	.enable        = net2272_enable,
1060	.disable       = net2272_disable,
1061
1062	.alloc_request = net2272_alloc_request,
1063	.free_request  = net2272_free_request,
1064
1065	.queue         = net2272_queue,
1066	.dequeue       = net2272_dequeue,
1067
1068	.set_halt      = net2272_set_halt,
1069	.set_wedge     = net2272_set_wedge,
1070	.fifo_status   = net2272_fifo_status,
1071	.fifo_flush    = net2272_fifo_flush,
1072};
1073
1074/*---------------------------------------------------------------------------*/
1075
1076static int
1077net2272_get_frame(struct usb_gadget *_gadget)
1078{
1079	struct net2272 *dev;
1080	unsigned long flags;
1081	u16 ret;
1082
1083	if (!_gadget)
1084		return -ENODEV;
1085	dev = container_of(_gadget, struct net2272, gadget);
1086	spin_lock_irqsave(&dev->lock, flags);
1087
1088	ret = net2272_read(dev, FRAME1) << 8;
1089	ret |= net2272_read(dev, FRAME0);
1090
1091	spin_unlock_irqrestore(&dev->lock, flags);
1092	return ret;
1093}
1094
1095static int
1096net2272_wakeup(struct usb_gadget *_gadget)
1097{
1098	struct net2272 *dev;
1099	u8 tmp;
1100	unsigned long flags;
1101
1102	if (!_gadget)
1103		return 0;
1104	dev = container_of(_gadget, struct net2272, gadget);
1105
1106	spin_lock_irqsave(&dev->lock, flags);
1107	tmp = net2272_read(dev, USBCTL0);
1108	if (tmp & (1 << IO_WAKEUP_ENABLE))
1109		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1110
1111	spin_unlock_irqrestore(&dev->lock, flags);
1112
1113	return 0;
1114}
1115
1116static int
1117net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1118{
1119	if (!_gadget)
1120		return -ENODEV;
1121
1122	_gadget->is_selfpowered = (value != 0);
1123
1124	return 0;
1125}
1126
1127static int
1128net2272_pullup(struct usb_gadget *_gadget, int is_on)
1129{
1130	struct net2272 *dev;
1131	u8 tmp;
1132	unsigned long flags;
1133
1134	if (!_gadget)
1135		return -ENODEV;
1136	dev = container_of(_gadget, struct net2272, gadget);
1137
1138	spin_lock_irqsave(&dev->lock, flags);
1139	tmp = net2272_read(dev, USBCTL0);
1140	dev->softconnect = (is_on != 0);
1141	if (is_on)
1142		tmp |= (1 << USB_DETECT_ENABLE);
1143	else
1144		tmp &= ~(1 << USB_DETECT_ENABLE);
1145	net2272_write(dev, USBCTL0, tmp);
1146	spin_unlock_irqrestore(&dev->lock, flags);
1147
1148	return 0;
1149}
1150
1151static int net2272_start(struct usb_gadget *_gadget,
1152		struct usb_gadget_driver *driver);
1153static int net2272_stop(struct usb_gadget *_gadget);
1154static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
1155
1156static const struct usb_gadget_ops net2272_ops = {
1157	.get_frame	= net2272_get_frame,
1158	.wakeup		= net2272_wakeup,
1159	.set_selfpowered = net2272_set_selfpowered,
1160	.pullup		= net2272_pullup,
1161	.udc_start	= net2272_start,
1162	.udc_stop	= net2272_stop,
1163	.udc_async_callbacks = net2272_async_callbacks,
1164};
1165
1166/*---------------------------------------------------------------------------*/
1167
1168static ssize_t
1169registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1170{
1171	struct net2272 *dev;
1172	char *next;
1173	unsigned size, t;
1174	unsigned long flags;
1175	u8 t1, t2;
1176	int i;
1177	const char *s;
1178
1179	dev = dev_get_drvdata(_dev);
1180	next = buf;
1181	size = PAGE_SIZE;
1182	spin_lock_irqsave(&dev->lock, flags);
1183
1184	/* Main Control Registers */
1185	t = scnprintf(next, size, "%s version %s,"
1186		"chiprev %02x, locctl %02x\n"
1187		"irqenb0 %02x irqenb1 %02x "
1188		"irqstat0 %02x irqstat1 %02x\n",
1189		driver_name, driver_vers, dev->chiprev,
1190		net2272_read(dev, LOCCTL),
1191		net2272_read(dev, IRQENB0),
1192		net2272_read(dev, IRQENB1),
1193		net2272_read(dev, IRQSTAT0),
1194		net2272_read(dev, IRQSTAT1));
1195	size -= t;
1196	next += t;
1197
1198	/* DMA */
1199	t1 = net2272_read(dev, DMAREQ);
1200	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1201		t1, ep_name[(t1 & 0x01) + 1],
1202		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1203		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1204		t1 & (1 << DMA_REQUEST) ? "req " : "",
1205		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1206	size -= t;
1207	next += t;
1208
1209	/* USB Control Registers */
1210	t1 = net2272_read(dev, USBCTL1);
1211	if (t1 & (1 << VBUS_PIN)) {
1212		if (t1 & (1 << USB_HIGH_SPEED))
1213			s = "high speed";
1214		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1215			s = "powered";
1216		else
1217			s = "full speed";
1218	} else
1219		s = "not attached";
1220	t = scnprintf(next, size,
1221		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1222		net2272_read(dev, USBCTL0), t1,
1223		net2272_read(dev, OURADDR), s);
1224	size -= t;
1225	next += t;
1226
1227	/* Endpoint Registers */
1228	for (i = 0; i < 4; ++i) {
1229		struct net2272_ep *ep;
1230
1231		ep = &dev->ep[i];
1232		if (i && !ep->desc)
1233			continue;
1234
1235		t1 = net2272_ep_read(ep, EP_CFG);
1236		t2 = net2272_ep_read(ep, EP_RSPSET);
1237		t = scnprintf(next, size,
1238			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1239			"irqenb %02x\n",
1240			ep->ep.name, t1, t2,
1241			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1242			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1243			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1244			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1245			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1246			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1247			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1248			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1249			net2272_ep_read(ep, EP_IRQENB));
1250		size -= t;
1251		next += t;
1252
1253		t = scnprintf(next, size,
1254			"\tstat0 %02x stat1 %02x avail %04x "
1255			"(ep%d%s-%s)%s\n",
1256			net2272_ep_read(ep, EP_STAT0),
1257			net2272_ep_read(ep, EP_STAT1),
1258			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1259			t1 & 0x0f,
1260			ep->is_in ? "in" : "out",
1261			type_string(t1 >> 5),
1262			ep->stopped ? "*" : "");
1263		size -= t;
1264		next += t;
1265
1266		t = scnprintf(next, size,
1267			"\tep_transfer %06x\n",
1268			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1269			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1270			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1271		size -= t;
1272		next += t;
1273
1274		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1275		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1276		t = scnprintf(next, size,
1277			"\tbuf-a %s buf-b %s\n",
1278			buf_state_string(t1),
1279			buf_state_string(t2));
1280		size -= t;
1281		next += t;
1282	}
1283
1284	spin_unlock_irqrestore(&dev->lock, flags);
1285
1286	return PAGE_SIZE - size;
1287}
1288static DEVICE_ATTR_RO(registers);
1289
1290/*---------------------------------------------------------------------------*/
1291
1292static void
1293net2272_set_fifo_mode(struct net2272 *dev, int mode)
1294{
1295	u8 tmp;
1296
1297	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1298	tmp |= (mode << 6);
1299	net2272_write(dev, LOCCTL, tmp);
1300
1301	INIT_LIST_HEAD(&dev->gadget.ep_list);
1302
1303	/* always ep-a, ep-c ... maybe not ep-b */
1304	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1305
1306	switch (mode) {
1307	case 0:
1308		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1309		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1310		break;
1311	case 1:
1312		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1313		dev->ep[1].fifo_size = 1024;
1314		dev->ep[2].fifo_size = 512;
1315		break;
1316	case 2:
1317		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1318		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1319		break;
1320	case 3:
1321		dev->ep[1].fifo_size = 1024;
1322		break;
1323	}
1324
1325	/* ep-c is always 2 512 byte buffers */
1326	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1327	dev->ep[3].fifo_size = 512;
1328}
1329
1330/*---------------------------------------------------------------------------*/
1331
1332static void
1333net2272_usb_reset(struct net2272 *dev)
1334{
1335	dev->gadget.speed = USB_SPEED_UNKNOWN;
1336
1337	net2272_cancel_dma(dev);
1338
1339	net2272_write(dev, IRQENB0, 0);
1340	net2272_write(dev, IRQENB1, 0);
1341
1342	/* clear irq state */
1343	net2272_write(dev, IRQSTAT0, 0xff);
1344	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1345
1346	net2272_write(dev, DMAREQ,
1347		(0 << DMA_BUFFER_VALID) |
1348		(0 << DMA_REQUEST_ENABLE) |
1349		(1 << DMA_CONTROL_DACK) |
1350		(dev->dma_eot_polarity << EOT_POLARITY) |
1351		(dev->dma_dack_polarity << DACK_POLARITY) |
1352		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1353		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1354
1355	net2272_cancel_dma(dev);
1356	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1357
1358	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1359	 * note that the higher level gadget drivers are expected to convert data to little endian.
1360	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1361	 */
1362	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1363	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1364}
1365
1366static void
1367net2272_usb_reinit(struct net2272 *dev)
1368{
1369	int i;
1370
1371	/* basic endpoint init */
1372	for (i = 0; i < 4; ++i) {
1373		struct net2272_ep *ep = &dev->ep[i];
1374
1375		ep->ep.name = ep_name[i];
1376		ep->dev = dev;
1377		ep->num = i;
1378		ep->not_empty = 0;
1379
1380		if (use_dma && ep->num == dma_ep)
1381			ep->dma = 1;
1382
1383		if (i > 0 && i <= 3)
1384			ep->fifo_size = 512;
1385		else
1386			ep->fifo_size = 64;
1387		net2272_ep_reset(ep);
1388
1389		if (i == 0) {
1390			ep->ep.caps.type_control = true;
1391		} else {
1392			ep->ep.caps.type_iso = true;
1393			ep->ep.caps.type_bulk = true;
1394			ep->ep.caps.type_int = true;
1395		}
1396
1397		ep->ep.caps.dir_in = true;
1398		ep->ep.caps.dir_out = true;
1399	}
1400	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1401
1402	dev->gadget.ep0 = &dev->ep[0].ep;
1403	dev->ep[0].stopped = 0;
1404	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1405}
1406
1407static void
1408net2272_ep0_start(struct net2272 *dev)
1409{
1410	struct net2272_ep *ep0 = &dev->ep[0];
1411
1412	net2272_ep_write(ep0, EP_RSPSET,
1413		(1 << NAK_OUT_PACKETS_MODE) |
1414		(1 << ALT_NAK_OUT_PACKETS));
1415	net2272_ep_write(ep0, EP_RSPCLR,
1416		(1 << HIDE_STATUS_PHASE) |
1417		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1418	net2272_write(dev, USBCTL0,
1419		(dev->softconnect << USB_DETECT_ENABLE) |
1420		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1421		(1 << IO_WAKEUP_ENABLE));
1422	net2272_write(dev, IRQENB0,
1423		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1424		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1425		(1 << DMA_DONE_INTERRUPT_ENABLE));
1426	net2272_write(dev, IRQENB1,
1427		(1 << VBUS_INTERRUPT_ENABLE) |
1428		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1429		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1430}
1431
1432/* when a driver is successfully registered, it will receive
1433 * control requests including set_configuration(), which enables
1434 * non-control requests.  then usb traffic follows until a
1435 * disconnect is reported.  then a host may connect again, or
1436 * the driver might get unbound.
1437 */
1438static int net2272_start(struct usb_gadget *_gadget,
1439		struct usb_gadget_driver *driver)
1440{
1441	struct net2272 *dev;
1442	unsigned i;
1443
1444	if (!driver || !driver->setup ||
1445	    driver->max_speed != USB_SPEED_HIGH)
1446		return -EINVAL;
1447
1448	dev = container_of(_gadget, struct net2272, gadget);
1449
1450	for (i = 0; i < 4; ++i)
1451		dev->ep[i].irqs = 0;
1452	/* hook up the driver ... */
1453	dev->softconnect = 1;
1454	dev->driver = driver;
1455
1456	/* ... then enable host detection and ep0; and we're ready
1457	 * for set_configuration as well as eventual disconnect.
1458	 */
1459	net2272_ep0_start(dev);
1460
1461	return 0;
1462}
1463
1464static void
1465stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1466{
1467	int i;
1468
1469	/* don't disconnect if it's not connected */
1470	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1471		driver = NULL;
1472
1473	/* stop hardware; prevent new request submissions;
1474	 * and kill any outstanding requests.
1475	 */
1476	net2272_usb_reset(dev);
1477	for (i = 0; i < 4; ++i)
1478		net2272_dequeue_all(&dev->ep[i]);
1479
1480	/* report disconnect; the driver is already quiesced */
1481	if (dev->async_callbacks && driver) {
1482		spin_unlock(&dev->lock);
1483		driver->disconnect(&dev->gadget);
1484		spin_lock(&dev->lock);
1485	}
1486
1487	net2272_usb_reinit(dev);
1488}
1489
1490static int net2272_stop(struct usb_gadget *_gadget)
1491{
1492	struct net2272 *dev;
1493	unsigned long flags;
1494
1495	dev = container_of(_gadget, struct net2272, gadget);
1496
1497	spin_lock_irqsave(&dev->lock, flags);
1498	stop_activity(dev, NULL);
1499	spin_unlock_irqrestore(&dev->lock, flags);
1500
1501	dev->driver = NULL;
1502
1503	return 0;
1504}
1505
1506static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
1507{
1508	struct net2272	*dev = container_of(_gadget, struct net2272, gadget);
1509
1510	spin_lock_irq(&dev->lock);
1511	dev->async_callbacks = enable;
1512	spin_unlock_irq(&dev->lock);
1513}
1514
1515/*---------------------------------------------------------------------------*/
1516/* handle ep-a/ep-b dma completions */
1517static void
1518net2272_handle_dma(struct net2272_ep *ep)
1519{
1520	struct net2272_request *req;
1521	unsigned len;
1522	int status;
1523
1524	if (!list_empty(&ep->queue))
1525		req = list_entry(ep->queue.next,
1526				struct net2272_request, queue);
1527	else
1528		req = NULL;
1529
1530	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1531
1532	/* Ensure DREQ is de-asserted */
1533	net2272_write(ep->dev, DMAREQ,
1534		(0 << DMA_BUFFER_VALID)
1535	      | (0 << DMA_REQUEST_ENABLE)
1536	      | (1 << DMA_CONTROL_DACK)
1537	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1538	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1539	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1540	      | (ep->dma << DMA_ENDPOINT_SELECT));
1541
1542	ep->dev->dma_busy = 0;
1543
1544	net2272_ep_write(ep, EP_IRQENB,
1545		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1546		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1547		| net2272_ep_read(ep, EP_IRQENB));
1548
1549	/* device-to-host transfer completed */
1550	if (ep->is_in) {
1551		/* validate a short packet or zlp if necessary */
1552		if ((req->req.length % ep->ep.maxpacket != 0) ||
1553				req->req.zero)
1554			set_fifo_bytecount(ep, 0);
1555
1556		net2272_done(ep, req, 0);
1557		if (!list_empty(&ep->queue)) {
1558			req = list_entry(ep->queue.next,
1559					struct net2272_request, queue);
1560			status = net2272_kick_dma(ep, req);
1561			if (status < 0)
1562				net2272_pio_advance(ep);
1563		}
1564
1565	/* host-to-device transfer completed */
1566	} else {
1567		/* terminated with a short packet? */
1568		if (net2272_read(ep->dev, IRQSTAT0) &
1569				(1 << DMA_DONE_INTERRUPT)) {
1570			/* abort system dma */
1571			net2272_cancel_dma(ep->dev);
1572		}
1573
1574		/* EP_TRANSFER will contain the number of bytes
1575		 * actually received.
1576		 * NOTE: There is no overflow detection on EP_TRANSFER:
1577		 * We can't deal with transfers larger than 2^24 bytes!
1578		 */
1579		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1580			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1581			| (net2272_ep_read(ep, EP_TRANSFER0));
1582
1583		if (ep->not_empty)
1584			len += 4;
1585
1586		req->req.actual += len;
1587
1588		/* get any remaining data */
1589		net2272_pio_advance(ep);
1590	}
1591}
1592
1593/*---------------------------------------------------------------------------*/
1594
1595static void
1596net2272_handle_ep(struct net2272_ep *ep)
1597{
1598	struct net2272_request *req;
1599	u8 stat0, stat1;
1600
1601	if (!list_empty(&ep->queue))
1602		req = list_entry(ep->queue.next,
1603			struct net2272_request, queue);
1604	else
1605		req = NULL;
1606
1607	/* ack all, and handle what we care about */
1608	stat0 = net2272_ep_read(ep, EP_STAT0);
1609	stat1 = net2272_ep_read(ep, EP_STAT1);
1610	ep->irqs++;
1611
1612	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1613		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1614
1615	net2272_ep_write(ep, EP_STAT0, stat0 &
1616		~((1 << NAK_OUT_PACKETS)
1617		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1618	net2272_ep_write(ep, EP_STAT1, stat1);
1619
1620	/* data packet(s) received (in the fifo, OUT)
1621	 * direction must be validated, otherwise control read status phase
1622	 * could be interpreted as a valid packet
1623	 */
1624	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1625		net2272_pio_advance(ep);
1626	/* data packet(s) transmitted (IN) */
1627	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1628		net2272_pio_advance(ep);
1629}
1630
1631static struct net2272_ep *
1632net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1633{
1634	struct net2272_ep *ep;
1635
1636	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1637		return &dev->ep[0];
1638
1639	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1640		u8 bEndpointAddress;
1641
1642		if (!ep->desc)
1643			continue;
1644		bEndpointAddress = ep->desc->bEndpointAddress;
1645		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1646			continue;
1647		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1648			return ep;
1649	}
1650	return NULL;
1651}
1652
1653/*
1654 * USB Test Packet:
1655 * JKJKJKJK * 9
1656 * JJKKJJKK * 8
1657 * JJJJKKKK * 8
1658 * JJJJJJJKKKKKKK * 8
1659 * JJJJJJJK * 8
1660 * {JKKKKKKK * 10}, JK
1661 */
1662static const u8 net2272_test_packet[] = {
1663	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1664	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1665	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1666	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1667	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1668	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1669};
1670
1671static void
1672net2272_set_test_mode(struct net2272 *dev, int mode)
1673{
1674	int i;
1675
1676	/* Disable all net2272 interrupts:
1677	 * Nothing but a power cycle should stop the test.
1678	 */
1679	net2272_write(dev, IRQENB0, 0x00);
1680	net2272_write(dev, IRQENB1, 0x00);
1681
1682	/* Force tranceiver to high-speed */
1683	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1684
1685	net2272_write(dev, PAGESEL, 0);
1686	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1687	net2272_write(dev, EP_RSPCLR,
1688			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1689			| (1 << HIDE_STATUS_PHASE));
1690	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1691	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1692
1693	/* wait for status phase to complete */
1694	while (!(net2272_read(dev, EP_STAT0) &
1695				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1696		;
1697
1698	/* Enable test mode */
1699	net2272_write(dev, USBTEST, mode);
1700
1701	/* load test packet */
1702	if (mode == USB_TEST_PACKET) {
1703		/* switch to 8 bit mode */
1704		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1705				~(1 << DATA_WIDTH));
1706
1707		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1708			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1709
1710		/* Validate test packet */
1711		net2272_write(dev, EP_TRANSFER0, 0);
1712	}
1713}
1714
1715static void
1716net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1717{
1718	struct net2272_ep *ep;
1719	u8 num, scratch;
1720
1721	/* starting a control request? */
1722	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1723		union {
1724			u8 raw[8];
1725			struct usb_ctrlrequest	r;
1726		} u;
1727		int tmp = 0;
1728		struct net2272_request *req;
1729
1730		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1731			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1732				dev->gadget.speed = USB_SPEED_HIGH;
1733			else
1734				dev->gadget.speed = USB_SPEED_FULL;
1735			dev_dbg(dev->dev, "%s\n",
1736				usb_speed_string(dev->gadget.speed));
1737		}
1738
1739		ep = &dev->ep[0];
1740		ep->irqs++;
1741
1742		/* make sure any leftover interrupt state is cleared */
1743		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1744		while (!list_empty(&ep->queue)) {
1745			req = list_entry(ep->queue.next,
1746				struct net2272_request, queue);
1747			net2272_done(ep, req,
1748				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1749		}
1750		ep->stopped = 0;
1751		dev->protocol_stall = 0;
1752		net2272_ep_write(ep, EP_STAT0,
1753			    (1 << DATA_IN_TOKEN_INTERRUPT)
1754			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1755			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1756			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1757			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1758		net2272_ep_write(ep, EP_STAT1,
1759			    (1 << TIMEOUT)
1760			  | (1 << USB_OUT_ACK_SENT)
1761			  | (1 << USB_OUT_NAK_SENT)
1762			  | (1 << USB_IN_ACK_RCVD)
1763			  | (1 << USB_IN_NAK_SENT)
1764			  | (1 << USB_STALL_SENT)
1765			  | (1 << LOCAL_OUT_ZLP));
1766
1767		/*
1768		 * Ensure Control Read pre-validation setting is beyond maximum size
1769		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1770		 *    an EP0 transfer following the Control Write is a Control Read,
1771		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1772		 *    pre-validation count.
1773		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1774		 *    the pre-validation count cannot cause an unexpected validatation
1775		 */
1776		net2272_write(dev, PAGESEL, 0);
1777		net2272_write(dev, EP_TRANSFER2, 0xff);
1778		net2272_write(dev, EP_TRANSFER1, 0xff);
1779		net2272_write(dev, EP_TRANSFER0, 0xff);
1780
1781		u.raw[0] = net2272_read(dev, SETUP0);
1782		u.raw[1] = net2272_read(dev, SETUP1);
1783		u.raw[2] = net2272_read(dev, SETUP2);
1784		u.raw[3] = net2272_read(dev, SETUP3);
1785		u.raw[4] = net2272_read(dev, SETUP4);
1786		u.raw[5] = net2272_read(dev, SETUP5);
1787		u.raw[6] = net2272_read(dev, SETUP6);
1788		u.raw[7] = net2272_read(dev, SETUP7);
1789		/*
1790		 * If you have a big endian cpu make sure le16_to_cpus
1791		 * performs the proper byte swapping here...
1792		 */
1793		le16_to_cpus(&u.r.wValue);
1794		le16_to_cpus(&u.r.wIndex);
1795		le16_to_cpus(&u.r.wLength);
1796
1797		/* ack the irq */
1798		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1799		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1800
1801		/* watch control traffic at the token level, and force
1802		 * synchronization before letting the status phase happen.
1803		 */
1804		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1805		if (ep->is_in) {
1806			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1807				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1808				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1809			stop_out_naking(ep);
1810		} else
1811			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1812				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814		net2272_ep_write(ep, EP_IRQENB, scratch);
1815
1816		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1817			goto delegate;
1818		switch (u.r.bRequest) {
1819		case USB_REQ_GET_STATUS: {
1820			struct net2272_ep *e;
1821			u16 status = 0;
1822
1823			switch (u.r.bRequestType & USB_RECIP_MASK) {
1824			case USB_RECIP_ENDPOINT:
1825				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1826				if (!e || u.r.wLength > 2)
1827					goto do_stall;
1828				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1829					status = cpu_to_le16(1);
1830				else
1831					status = cpu_to_le16(0);
1832
1833				/* don't bother with a request object! */
1834				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1835				writew(status, net2272_reg_addr(dev, EP_DATA));
1836				set_fifo_bytecount(&dev->ep[0], 0);
1837				allow_status(ep);
1838				dev_vdbg(dev->dev, "%s stat %02x\n",
1839					ep->ep.name, status);
1840				goto next_endpoints;
1841			case USB_RECIP_DEVICE:
1842				if (u.r.wLength > 2)
1843					goto do_stall;
1844				if (dev->gadget.is_selfpowered)
1845					status = (1 << USB_DEVICE_SELF_POWERED);
1846
1847				/* don't bother with a request object! */
1848				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849				writew(status, net2272_reg_addr(dev, EP_DATA));
1850				set_fifo_bytecount(&dev->ep[0], 0);
1851				allow_status(ep);
1852				dev_vdbg(dev->dev, "device stat %02x\n", status);
1853				goto next_endpoints;
1854			case USB_RECIP_INTERFACE:
1855				if (u.r.wLength > 2)
1856					goto do_stall;
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "interface status %02x\n", status);
1864				goto next_endpoints;
1865			}
1866
1867			break;
1868		}
1869		case USB_REQ_CLEAR_FEATURE: {
1870			struct net2272_ep *e;
1871
1872			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1873				goto delegate;
1874			if (u.r.wValue != USB_ENDPOINT_HALT ||
1875			    u.r.wLength != 0)
1876				goto do_stall;
1877			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1878			if (!e)
1879				goto do_stall;
1880			if (e->wedged) {
1881				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1882					ep->ep.name);
1883			} else {
1884				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1885				clear_halt(e);
1886			}
1887			allow_status(ep);
1888			goto next_endpoints;
1889		}
1890		case USB_REQ_SET_FEATURE: {
1891			struct net2272_ep *e;
1892
1893			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894				if (u.r.wIndex != NORMAL_OPERATION)
1895					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1896				allow_status(ep);
1897				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1898				goto next_endpoints;
1899			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1900				goto delegate;
1901			if (u.r.wValue != USB_ENDPOINT_HALT ||
1902			    u.r.wLength != 0)
1903				goto do_stall;
1904			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1905			if (!e)
1906				goto do_stall;
1907			set_halt(e);
1908			allow_status(ep);
1909			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1910			goto next_endpoints;
1911		}
1912		case USB_REQ_SET_ADDRESS: {
1913			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1914			allow_status(ep);
1915			break;
1916		}
1917		default:
1918 delegate:
1919			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1920				"ep_cfg %08x\n",
1921				u.r.bRequestType, u.r.bRequest,
1922				u.r.wValue, u.r.wIndex,
1923				net2272_ep_read(ep, EP_CFG));
1924			if (dev->async_callbacks) {
1925				spin_unlock(&dev->lock);
1926				tmp = dev->driver->setup(&dev->gadget, &u.r);
1927				spin_lock(&dev->lock);
1928			}
1929		}
1930
1931		/* stall ep0 on error */
1932		if (tmp < 0) {
1933 do_stall:
1934			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1935				u.r.bRequestType, u.r.bRequest, tmp);
1936			dev->protocol_stall = 1;
1937		}
1938	/* endpoint dma irq? */
1939	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1940		net2272_cancel_dma(dev);
1941		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1942		stat &= ~(1 << DMA_DONE_INTERRUPT);
1943		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1944			? 2 : 1;
1945
1946		ep = &dev->ep[num];
1947		net2272_handle_dma(ep);
1948	}
1949
1950 next_endpoints:
1951	/* endpoint data irq? */
1952	scratch = stat & 0x0f;
1953	stat &= ~0x0f;
1954	for (num = 0; scratch; num++) {
1955		u8 t;
1956
1957		/* does this endpoint's FIFO and queue need tending? */
1958		t = 1 << num;
1959		if ((scratch & t) == 0)
1960			continue;
1961		scratch ^= t;
1962
1963		ep = &dev->ep[num];
1964		net2272_handle_ep(ep);
1965	}
1966
1967	/* some interrupts we can just ignore */
1968	stat &= ~(1 << SOF_INTERRUPT);
1969
1970	if (stat)
1971		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1972}
1973
1974static void
1975net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1976{
1977	u8 tmp, mask;
1978
1979	/* after disconnect there's nothing else to do! */
1980	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1981	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1982
1983	if (stat & tmp) {
1984		bool	reset = false;
1985		bool	disconnect = false;
1986
1987		/*
1988		 * Ignore disconnects and resets if the speed hasn't been set.
1989		 * VBUS can bounce and there's always an initial reset.
1990		 */
1991		net2272_write(dev, IRQSTAT1, tmp);
1992		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1993			if ((stat & (1 << VBUS_INTERRUPT)) &&
1994					(net2272_read(dev, USBCTL1) &
1995						(1 << VBUS_PIN)) == 0) {
1996				disconnect = true;
1997				dev_dbg(dev->dev, "disconnect %s\n",
1998					dev->driver->driver.name);
1999			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2000					(net2272_read(dev, USBCTL1) & mask)
2001						== 0) {
2002				reset = true;
2003				dev_dbg(dev->dev, "reset %s\n",
2004					dev->driver->driver.name);
2005			}
2006
2007			if (disconnect || reset) {
2008				stop_activity(dev, dev->driver);
2009				net2272_ep0_start(dev);
2010				if (dev->async_callbacks) {
2011					spin_unlock(&dev->lock);
2012					if (reset)
2013						usb_gadget_udc_reset(&dev->gadget, dev->driver);
2014					else
2015						(dev->driver->disconnect)(&dev->gadget);
2016					spin_lock(&dev->lock);
2017				}
2018				return;
2019			}
2020		}
2021		stat &= ~tmp;
2022
2023		if (!stat)
2024			return;
2025	}
2026
2027	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2028	if (stat & tmp) {
2029		net2272_write(dev, IRQSTAT1, tmp);
2030		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2031			if (dev->async_callbacks && dev->driver->suspend)
2032				dev->driver->suspend(&dev->gadget);
2033			if (!enable_suspend) {
2034				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2035				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2036			}
2037		} else {
2038			if (dev->async_callbacks && dev->driver->resume)
2039				dev->driver->resume(&dev->gadget);
2040		}
2041		stat &= ~tmp;
2042	}
2043
2044	/* clear any other status/irqs */
2045	if (stat)
2046		net2272_write(dev, IRQSTAT1, stat);
2047
2048	/* some status we can just ignore */
2049	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2050			| (1 << SUSPEND_REQUEST_INTERRUPT)
2051			| (1 << RESUME_INTERRUPT));
2052	if (!stat)
2053		return;
2054	else
2055		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2056}
2057
2058static irqreturn_t net2272_irq(int irq, void *_dev)
2059{
2060	struct net2272 *dev = _dev;
2061#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2062	u32 intcsr;
2063#endif
2064#if defined(PLX_PCI_RDK)
2065	u8 dmareq;
2066#endif
2067	spin_lock(&dev->lock);
2068#if defined(PLX_PCI_RDK)
2069	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2070
2071	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2072		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2073				dev->rdk1.plx9054_base_addr + INTCSR);
2074		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2075		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2076		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2077		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2078			dev->rdk1.plx9054_base_addr + INTCSR);
2079	}
2080	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2081		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2082				dev->rdk1.plx9054_base_addr + DMACSR0);
2083
2084		dmareq = net2272_read(dev, DMAREQ);
2085		if (dmareq & 0x01)
2086			net2272_handle_dma(&dev->ep[2]);
2087		else
2088			net2272_handle_dma(&dev->ep[1]);
2089	}
2090#endif
2091#if defined(PLX_PCI_RDK2)
2092	/* see if PCI int for us by checking irqstat */
2093	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2094	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2095		spin_unlock(&dev->lock);
2096		return IRQ_NONE;
2097	}
2098	/* check dma interrupts */
2099#endif
2100	/* Platform/devcice interrupt handler */
2101#if !defined(PLX_PCI_RDK)
2102	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2103	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2104#endif
2105	spin_unlock(&dev->lock);
2106
2107	return IRQ_HANDLED;
2108}
2109
2110static int net2272_present(struct net2272 *dev)
2111{
2112	/*
2113	 * Quick test to see if CPU can communicate properly with the NET2272.
2114	 * Verifies connection using writes and reads to write/read and
2115	 * read-only registers.
2116	 *
2117	 * This routine is strongly recommended especially during early bring-up
2118	 * of new hardware, however for designs that do not apply Power On System
2119	 * Tests (POST) it may discarded (or perhaps minimized).
2120	 */
2121	unsigned int ii;
2122	u8 val, refval;
2123
2124	/* Verify NET2272 write/read SCRATCH register can write and read */
2125	refval = net2272_read(dev, SCRATCH);
2126	for (ii = 0; ii < 0x100; ii += 7) {
2127		net2272_write(dev, SCRATCH, ii);
2128		val = net2272_read(dev, SCRATCH);
2129		if (val != ii) {
2130			dev_dbg(dev->dev,
2131				"%s: write/read SCRATCH register test failed: "
2132				"wrote:0x%2.2x, read:0x%2.2x\n",
2133				__func__, ii, val);
2134			return -EINVAL;
2135		}
2136	}
2137	/* To be nice, we write the original SCRATCH value back: */
2138	net2272_write(dev, SCRATCH, refval);
2139
2140	/* Verify NET2272 CHIPREV register is read-only: */
2141	refval = net2272_read(dev, CHIPREV_2272);
2142	for (ii = 0; ii < 0x100; ii += 7) {
2143		net2272_write(dev, CHIPREV_2272, ii);
2144		val = net2272_read(dev, CHIPREV_2272);
2145		if (val != refval) {
2146			dev_dbg(dev->dev,
2147				"%s: write/read CHIPREV register test failed: "
2148				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2149				__func__, ii, val, refval);
2150			return -EINVAL;
2151		}
2152	}
2153
2154	/*
2155	 * Verify NET2272's "NET2270 legacy revision" register
2156	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2157	 *    register should read the same value, regardless of the NET2272
2158	 *    silicon revision.  The legacy register applies to NET2270
2159	 *    firmware being applied to the NET2272.
2160	 */
2161	val = net2272_read(dev, CHIPREV_LEGACY);
2162	if (val != NET2270_LEGACY_REV) {
2163		/*
2164		 * Unexpected legacy revision value
2165		 * - Perhaps the chip is a NET2270?
2166		 */
2167		dev_dbg(dev->dev,
2168			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2169			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2170			__func__, NET2270_LEGACY_REV, val);
2171		return -EINVAL;
2172	}
2173
2174	/*
2175	 * Verify NET2272 silicon revision
2176	 *  - This revision register is appropriate for the silicon version
2177	 *    of the NET2272
2178	 */
2179	val = net2272_read(dev, CHIPREV_2272);
2180	switch (val) {
2181	case CHIPREV_NET2272_R1:
2182		/*
2183		 * NET2272 Rev 1 has DMA related errata:
2184		 *  - Newer silicon (Rev 1A or better) required
2185		 */
2186		dev_dbg(dev->dev,
2187			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2188			__func__);
2189		break;
2190	case CHIPREV_NET2272_R1A:
2191		break;
2192	default:
2193		/* NET2272 silicon version *may* not work with this firmware */
2194		dev_dbg(dev->dev,
2195			"%s: unexpected silicon revision register value: "
2196			" CHIPREV_2272: 0x%2.2x\n",
2197			__func__, val);
2198		/*
2199		 * Return Success, even though the chip rev is not an expected value
2200		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2201		 *  - Often, new silicon is perfectly compatible
2202		 */
2203	}
2204
2205	/* Success: NET2272 checks out OK */
2206	return 0;
2207}
2208
2209static void
2210net2272_gadget_release(struct device *_dev)
2211{
2212	struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
2213
2214	kfree(dev);
2215}
2216
2217/*---------------------------------------------------------------------------*/
2218
2219static void
2220net2272_remove(struct net2272 *dev)
2221{
2222	if (dev->added)
2223		usb_del_gadget(&dev->gadget);
2224	free_irq(dev->irq, dev);
2225	iounmap(dev->base_addr);
2226	device_remove_file(dev->dev, &dev_attr_registers);
2227
2228	dev_info(dev->dev, "unbind\n");
2229}
2230
2231static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2232{
2233	struct net2272 *ret;
2234
2235	if (!irq) {
2236		dev_dbg(dev, "No IRQ!\n");
2237		return ERR_PTR(-ENODEV);
2238	}
2239
2240	/* alloc, and start init */
2241	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2242	if (!ret)
2243		return ERR_PTR(-ENOMEM);
2244
2245	spin_lock_init(&ret->lock);
2246	ret->irq = irq;
2247	ret->dev = dev;
2248	ret->gadget.ops = &net2272_ops;
2249	ret->gadget.max_speed = USB_SPEED_HIGH;
2250
2251	/* the "gadget" abstracts/virtualizes the controller */
2252	ret->gadget.name = driver_name;
2253	usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
2254
2255	return ret;
2256}
2257
2258static int
2259net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2260{
2261	int ret;
2262
2263	/* See if there... */
2264	if (net2272_present(dev)) {
2265		dev_warn(dev->dev, "2272 not found!\n");
2266		ret = -ENODEV;
2267		goto err;
2268	}
2269
2270	net2272_usb_reset(dev);
2271	net2272_usb_reinit(dev);
2272
2273	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2274	if (ret) {
2275		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2276		goto err;
2277	}
2278
2279	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2280
2281	/* done */
2282	dev_info(dev->dev, "%s\n", driver_desc);
2283	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2284		dev->irq, dev->base_addr, dev->chiprev,
2285		dma_mode_string());
2286	dev_info(dev->dev, "version: %s\n", driver_vers);
2287
2288	ret = device_create_file(dev->dev, &dev_attr_registers);
2289	if (ret)
2290		goto err_irq;
2291
2292	ret = usb_add_gadget(&dev->gadget);
2293	if (ret)
2294		goto err_add_udc;
2295	dev->added = 1;
2296
2297	return 0;
2298
2299err_add_udc:
2300	device_remove_file(dev->dev, &dev_attr_registers);
2301 err_irq:
2302	free_irq(dev->irq, dev);
2303 err:
2304	return ret;
2305}
2306
2307#ifdef CONFIG_USB_PCI
2308
2309/*
2310 * wrap this driver around the specified device, but
2311 * don't respond over USB until a gadget driver binds to us
2312 */
2313
2314static int
2315net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2316{
2317	unsigned long resource, len, tmp;
2318	void __iomem *mem_mapped_addr[4];
2319	int ret, i;
2320
2321	/*
2322	 * BAR 0 holds PLX 9054 config registers
2323	 * BAR 1 is i/o memory; unused here
2324	 * BAR 2 holds EPLD config registers
2325	 * BAR 3 holds NET2272 registers
2326	 */
2327
2328	/* Find and map all address spaces */
2329	for (i = 0; i < 4; ++i) {
2330		if (i == 1)
2331			continue;	/* BAR1 unused */
2332
2333		resource = pci_resource_start(pdev, i);
2334		len = pci_resource_len(pdev, i);
2335
2336		if (!request_mem_region(resource, len, driver_name)) {
2337			dev_dbg(dev->dev, "controller already in use\n");
2338			ret = -EBUSY;
2339			goto err;
2340		}
2341
2342		mem_mapped_addr[i] = ioremap(resource, len);
2343		if (mem_mapped_addr[i] == NULL) {
2344			release_mem_region(resource, len);
2345			dev_dbg(dev->dev, "can't map memory\n");
2346			ret = -EFAULT;
2347			goto err;
2348		}
2349	}
2350
2351	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2352	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2353	dev->base_addr = mem_mapped_addr[3];
2354
2355	/* Set PLX 9054 bus width (16 bits) */
2356	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2357	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2358			dev->rdk1.plx9054_base_addr + LBRD1);
2359
2360	/* Enable PLX 9054 Interrupts */
2361	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2362			(1 << PCI_INTERRUPT_ENABLE) |
2363			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2364			dev->rdk1.plx9054_base_addr + INTCSR);
2365
2366	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2367			dev->rdk1.plx9054_base_addr + DMACSR0);
2368
2369	/* reset */
2370	writeb((1 << EPLD_DMA_ENABLE) |
2371		(1 << DMA_CTL_DACK) |
2372		(1 << DMA_TIMEOUT_ENABLE) |
2373		(1 << USER) |
2374		(0 << MPX_MODE) |
2375		(1 << BUSWIDTH) |
2376		(1 << NET2272_RESET),
2377		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2378
2379	mb();
2380	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2381		~(1 << NET2272_RESET),
2382		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2383	udelay(200);
2384
2385	return 0;
2386
2387 err:
2388	while (--i >= 0) {
2389		if (i == 1)
2390			continue;	/* BAR1 unused */
2391		iounmap(mem_mapped_addr[i]);
2392		release_mem_region(pci_resource_start(pdev, i),
2393			pci_resource_len(pdev, i));
2394	}
2395
2396	return ret;
2397}
2398
2399static int
2400net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2401{
2402	unsigned long resource, len;
2403	void __iomem *mem_mapped_addr[2];
2404	int ret, i;
2405
2406	/*
2407	 * BAR 0 holds FGPA config registers
2408	 * BAR 1 holds NET2272 registers
2409	 */
2410
2411	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2412	for (i = 0; i < 2; ++i) {
2413		resource = pci_resource_start(pdev, i);
2414		len = pci_resource_len(pdev, i);
2415
2416		if (!request_mem_region(resource, len, driver_name)) {
2417			dev_dbg(dev->dev, "controller already in use\n");
2418			ret = -EBUSY;
2419			goto err;
2420		}
2421
2422		mem_mapped_addr[i] = ioremap(resource, len);
2423		if (mem_mapped_addr[i] == NULL) {
2424			release_mem_region(resource, len);
2425			dev_dbg(dev->dev, "can't map memory\n");
2426			ret = -EFAULT;
2427			goto err;
2428		}
2429	}
2430
2431	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2432	dev->base_addr = mem_mapped_addr[1];
2433
2434	mb();
2435	/* Set 2272 bus width (16 bits) and reset */
2436	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2437	udelay(200);
2438	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2439	/* Print fpga version number */
2440	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2441		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2442	/* Enable FPGA Interrupts */
2443	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2444
2445	return 0;
2446
2447 err:
2448	while (--i >= 0) {
2449		iounmap(mem_mapped_addr[i]);
2450		release_mem_region(pci_resource_start(pdev, i),
2451			pci_resource_len(pdev, i));
2452	}
2453
2454	return ret;
2455}
2456
2457static int
2458net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2459{
2460	struct net2272 *dev;
2461	int ret;
2462
2463	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2464	if (IS_ERR(dev))
2465		return PTR_ERR(dev);
2466	dev->dev_id = pdev->device;
2467
2468	if (pci_enable_device(pdev) < 0) {
2469		ret = -ENODEV;
2470		goto err_put;
2471	}
2472
2473	pci_set_master(pdev);
2474
2475	switch (pdev->device) {
2476	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2477	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2478	default: BUG();
2479	}
2480	if (ret)
2481		goto err_pci;
2482
2483	ret = net2272_probe_fin(dev, 0);
2484	if (ret)
2485		goto err_pci;
2486
2487	pci_set_drvdata(pdev, dev);
2488
2489	return 0;
2490
2491 err_pci:
2492	pci_disable_device(pdev);
2493 err_put:
2494	usb_put_gadget(&dev->gadget);
2495
2496	return ret;
2497}
2498
2499static void
2500net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2501{
2502	int i;
2503
2504	/* disable PLX 9054 interrupts */
2505	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2506		~(1 << PCI_INTERRUPT_ENABLE),
2507		dev->rdk1.plx9054_base_addr + INTCSR);
2508
2509	/* clean up resources allocated during probe() */
2510	iounmap(dev->rdk1.plx9054_base_addr);
2511	iounmap(dev->rdk1.epld_base_addr);
2512
2513	for (i = 0; i < 4; ++i) {
2514		if (i == 1)
2515			continue;	/* BAR1 unused */
2516		release_mem_region(pci_resource_start(pdev, i),
2517			pci_resource_len(pdev, i));
2518	}
2519}
2520
2521static void
2522net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2523{
2524	int i;
2525
2526	/* disable fpga interrupts
2527	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2528			~(1 << PCI_INTERRUPT_ENABLE),
2529			dev->rdk1.plx9054_base_addr + INTCSR);
2530	*/
2531
2532	/* clean up resources allocated during probe() */
2533	iounmap(dev->rdk2.fpga_base_addr);
2534
2535	for (i = 0; i < 2; ++i)
2536		release_mem_region(pci_resource_start(pdev, i),
2537			pci_resource_len(pdev, i));
2538}
2539
2540static void
2541net2272_pci_remove(struct pci_dev *pdev)
2542{
2543	struct net2272 *dev = pci_get_drvdata(pdev);
2544
2545	net2272_remove(dev);
2546
2547	switch (pdev->device) {
2548	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2549	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2550	default: BUG();
2551	}
2552
2553	pci_disable_device(pdev);
2554
2555	usb_put_gadget(&dev->gadget);
2556}
2557
2558/* Table of matching PCI IDs */
2559static struct pci_device_id pci_ids[] = {
2560	{	/* RDK 1 card */
2561		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562		.class_mask  = 0,
2563		.vendor      = PCI_VENDOR_ID_PLX,
2564		.device      = PCI_DEVICE_ID_RDK1,
2565		.subvendor   = PCI_ANY_ID,
2566		.subdevice   = PCI_ANY_ID,
2567	},
2568	{	/* RDK 2 card */
2569		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2570		.class_mask  = 0,
2571		.vendor      = PCI_VENDOR_ID_PLX,
2572		.device      = PCI_DEVICE_ID_RDK2,
2573		.subvendor   = PCI_ANY_ID,
2574		.subdevice   = PCI_ANY_ID,
2575	},
2576	{ }
2577};
2578MODULE_DEVICE_TABLE(pci, pci_ids);
2579
2580static struct pci_driver net2272_pci_driver = {
2581	.name     = driver_name,
2582	.id_table = pci_ids,
2583
2584	.probe    = net2272_pci_probe,
2585	.remove   = net2272_pci_remove,
2586};
2587
2588static int net2272_pci_register(void)
2589{
2590	return pci_register_driver(&net2272_pci_driver);
2591}
2592
2593static void net2272_pci_unregister(void)
2594{
2595	pci_unregister_driver(&net2272_pci_driver);
2596}
2597
2598#else
2599static inline int net2272_pci_register(void) { return 0; }
2600static inline void net2272_pci_unregister(void) { }
2601#endif
2602
2603/*---------------------------------------------------------------------------*/
2604
2605static int
2606net2272_plat_probe(struct platform_device *pdev)
2607{
2608	struct net2272 *dev;
2609	int ret;
2610	unsigned int irqflags;
2611	resource_size_t base, len;
2612	struct resource *iomem, *iomem_bus, *irq_res;
2613
2614	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2615	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2617	if (!irq_res || !iomem) {
2618		dev_err(&pdev->dev, "must provide irq/base addr");
2619		return -EINVAL;
2620	}
2621
2622	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2623	if (IS_ERR(dev))
2624		return PTR_ERR(dev);
2625
2626	irqflags = 0;
2627	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2628		irqflags |= IRQF_TRIGGER_RISING;
2629	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2630		irqflags |= IRQF_TRIGGER_FALLING;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2632		irqflags |= IRQF_TRIGGER_HIGH;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2634		irqflags |= IRQF_TRIGGER_LOW;
2635
2636	base = iomem->start;
2637	len = resource_size(iomem);
2638	if (iomem_bus)
2639		dev->base_shift = iomem_bus->start;
2640
2641	if (!request_mem_region(base, len, driver_name)) {
2642		dev_dbg(dev->dev, "get request memory region!\n");
2643		ret = -EBUSY;
2644		goto err;
2645	}
2646	dev->base_addr = ioremap(base, len);
2647	if (!dev->base_addr) {
2648		dev_dbg(dev->dev, "can't map memory\n");
2649		ret = -EFAULT;
2650		goto err_req;
2651	}
2652
2653	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2654	if (ret)
2655		goto err_io;
2656
2657	platform_set_drvdata(pdev, dev);
2658	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2659		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2660
2661	return 0;
2662
2663 err_io:
2664	iounmap(dev->base_addr);
2665 err_req:
2666	release_mem_region(base, len);
2667 err:
2668	usb_put_gadget(&dev->gadget);
2669
2670	return ret;
2671}
2672
2673static int
2674net2272_plat_remove(struct platform_device *pdev)
2675{
2676	struct net2272 *dev = platform_get_drvdata(pdev);
2677
2678	net2272_remove(dev);
2679
2680	release_mem_region(pdev->resource[0].start,
2681		resource_size(&pdev->resource[0]));
2682
2683	usb_put_gadget(&dev->gadget);
2684
2685	return 0;
2686}
2687
2688static struct platform_driver net2272_plat_driver = {
2689	.probe   = net2272_plat_probe,
2690	.remove  = net2272_plat_remove,
2691	.driver  = {
2692		.name  = driver_name,
2693	},
2694	/* FIXME .suspend, .resume */
2695};
2696MODULE_ALIAS("platform:net2272");
2697
2698static int __init net2272_init(void)
2699{
2700	int ret;
2701
2702	ret = net2272_pci_register();
2703	if (ret)
2704		return ret;
2705	ret = platform_driver_register(&net2272_plat_driver);
2706	if (ret)
2707		goto err_pci;
2708	return ret;
2709
2710err_pci:
2711	net2272_pci_unregister();
2712	return ret;
2713}
2714module_init(net2272_init);
2715
2716static void __exit net2272_cleanup(void)
2717{
2718	net2272_pci_unregister();
2719	platform_driver_unregister(&net2272_plat_driver);
2720}
2721module_exit(net2272_cleanup);
2722
2723MODULE_DESCRIPTION(DRIVER_DESC);
2724MODULE_AUTHOR("PLX Technology, Inc.");
2725MODULE_LICENSE("GPL");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Driver for PLX NET2272 USB device controller
   4 *
   5 * Copyright (C) 2005-2006 PLX Technology, Inc.
   6 * Copyright (C) 2006-2011 Analog Devices, Inc.
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/device.h>
  11#include <linux/errno.h>
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/ioport.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/pci.h>
  21#include <linux/platform_device.h>
  22#include <linux/prefetch.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <linux/usb.h>
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29
  30#include <asm/byteorder.h>
  31#include <linux/unaligned.h>
  32
  33#include "net2272.h"
  34
  35#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  36
  37static const char driver_name[] = "net2272";
  38static const char driver_vers[] = "2006 October 17/mainline";
  39static const char driver_desc[] = DRIVER_DESC;
  40
  41static const char ep0name[] = "ep0";
  42static const char * const ep_name[] = {
  43	ep0name,
  44	"ep-a", "ep-b", "ep-c",
  45};
  46
  47#ifdef CONFIG_USB_NET2272_DMA
  48/*
  49 * use_dma: the NET2272 can use an external DMA controller.
  50 * Note that since there is no generic DMA api, some functions,
  51 * notably request_dma, start_dma, and cancel_dma will need to be
  52 * modified for your platform's particular dma controller.
  53 *
  54 * If use_dma is disabled, pio will be used instead.
  55 */
  56static bool use_dma = false;
  57module_param(use_dma, bool, 0644);
  58
  59/*
  60 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  61 * The NET2272 can only use dma for a single endpoint at a time.
  62 * At some point this could be modified to allow either endpoint
  63 * to take control of dma as it becomes available.
  64 *
  65 * Note that DMA should not be used on OUT endpoints unless it can
  66 * be guaranteed that no short packets will arrive on an IN endpoint
  67 * while the DMA operation is pending.  Otherwise the OUT DMA will
  68 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  69 */
  70static ushort dma_ep = 1;
  71module_param(dma_ep, ushort, 0644);
  72
  73/*
  74 * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
  75 *	mode 0 == Slow DREQ mode
  76 *	mode 1 == Fast DREQ mode
  77 *	mode 2 == Burst mode
  78 */
  79static ushort dma_mode = 2;
  80module_param(dma_mode, ushort, 0644);
  81#else
  82#define use_dma 0
  83#define dma_ep 1
  84#define dma_mode 2
  85#endif
  86
  87/*
  88 * fifo_mode: net2272 buffer configuration:
  89 *      mode 0 == ep-{a,b,c} 512db each
  90 *      mode 1 == ep-a 1k, ep-{b,c} 512db
  91 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
  92 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
  93 */
  94static ushort fifo_mode;
  95module_param(fifo_mode, ushort, 0644);
  96
  97/*
  98 * enable_suspend: When enabled, the driver will respond to
  99 * USB suspend requests by powering down the NET2272.  Otherwise,
 100 * USB suspend requests will be ignored.  This is acceptable for
 101 * self-powered devices.  For bus powered devices set this to 1.
 102 */
 103static ushort enable_suspend;
 104module_param(enable_suspend, ushort, 0644);
 105
 106static void assert_out_naking(struct net2272_ep *ep, const char *where)
 107{
 108	u8 tmp;
 109
 110#ifndef DEBUG
 111	return;
 112#endif
 113
 114	tmp = net2272_ep_read(ep, EP_STAT0);
 115	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 116		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 117			ep->ep.name, where, tmp);
 118		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 119	}
 120}
 121#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 122
 123static void stop_out_naking(struct net2272_ep *ep)
 124{
 125	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 126
 127	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 128		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 129}
 130
 131#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 132
 133static char *type_string(u8 bmAttributes)
 134{
 135	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 136	case USB_ENDPOINT_XFER_BULK: return "bulk";
 137	case USB_ENDPOINT_XFER_ISOC: return "iso";
 138	case USB_ENDPOINT_XFER_INT:  return "intr";
 139	default:                     return "control";
 140	}
 141}
 142
 143static char *buf_state_string(unsigned state)
 144{
 145	switch (state) {
 146	case BUFF_FREE:  return "free";
 147	case BUFF_VALID: return "valid";
 148	case BUFF_LCL:   return "local";
 149	case BUFF_USB:   return "usb";
 150	default:         return "unknown";
 151	}
 152}
 153
 154static char *dma_mode_string(void)
 155{
 156	if (!use_dma)
 157		return "PIO";
 158	switch (dma_mode) {
 159	case 0:  return "SLOW DREQ";
 160	case 1:  return "FAST DREQ";
 161	case 2:  return "BURST";
 162	default: return "invalid";
 163	}
 164}
 165
 166static void net2272_dequeue_all(struct net2272_ep *);
 167static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 168static int net2272_fifo_status(struct usb_ep *);
 169
 170static const struct usb_ep_ops net2272_ep_ops;
 171
 172/*---------------------------------------------------------------------------*/
 173
 174static int
 175net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 176{
 177	struct net2272 *dev;
 178	struct net2272_ep *ep;
 179	u32 max;
 180	u8 tmp;
 181	unsigned long flags;
 182
 183	ep = container_of(_ep, struct net2272_ep, ep);
 184	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 185			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 186		return -EINVAL;
 187	dev = ep->dev;
 188	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 189		return -ESHUTDOWN;
 190
 191	max = usb_endpoint_maxp(desc);
 192
 193	spin_lock_irqsave(&dev->lock, flags);
 194	_ep->maxpacket = max;
 195	ep->desc = desc;
 196
 197	/* net2272_ep_reset() has already been called */
 198	ep->stopped = 0;
 199	ep->wedged = 0;
 200
 201	/* set speed-dependent max packet */
 202	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 203	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 204
 205	/* set type, direction, address; reset fifo counters */
 206	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 207	tmp = usb_endpoint_type(desc);
 208	if (usb_endpoint_xfer_bulk(desc)) {
 209		/* catch some particularly blatant driver bugs */
 210		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 211		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 212			spin_unlock_irqrestore(&dev->lock, flags);
 213			return -ERANGE;
 214		}
 215	}
 216	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 217	tmp <<= ENDPOINT_TYPE;
 218	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 219	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 220	tmp |= (1 << ENDPOINT_ENABLE);
 221
 222	/* for OUT transfers, block the rx fifo until a read is posted */
 223	ep->is_in = usb_endpoint_dir_in(desc);
 224	if (!ep->is_in)
 225		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 226
 227	net2272_ep_write(ep, EP_CFG, tmp);
 228
 229	/* enable irqs */
 230	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 231	net2272_write(dev, IRQENB0, tmp);
 232
 233	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 234		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 235		| net2272_ep_read(ep, EP_IRQENB);
 236	net2272_ep_write(ep, EP_IRQENB, tmp);
 237
 238	tmp = desc->bEndpointAddress;
 239	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 240		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 241		type_string(desc->bmAttributes), max,
 242		net2272_ep_read(ep, EP_CFG));
 243
 244	spin_unlock_irqrestore(&dev->lock, flags);
 245	return 0;
 246}
 247
 248static void net2272_ep_reset(struct net2272_ep *ep)
 249{
 250	u8 tmp;
 251
 252	ep->desc = NULL;
 253	INIT_LIST_HEAD(&ep->queue);
 254
 255	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 256	ep->ep.ops = &net2272_ep_ops;
 257
 258	/* disable irqs, endpoint */
 259	net2272_ep_write(ep, EP_IRQENB, 0);
 260
 261	/* init to our chosen defaults, notably so that we NAK OUT
 262	 * packets until the driver queues a read.
 263	 */
 264	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 265	net2272_ep_write(ep, EP_RSPSET, tmp);
 266
 267	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 268	if (ep->num != 0)
 269		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 270
 271	net2272_ep_write(ep, EP_RSPCLR, tmp);
 272
 273	/* scrub most status bits, and flush any fifo state */
 274	net2272_ep_write(ep, EP_STAT0,
 275			  (1 << DATA_IN_TOKEN_INTERRUPT)
 276			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 277			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 278			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 279			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 280
 281	net2272_ep_write(ep, EP_STAT1,
 282			    (1 << TIMEOUT)
 283			  | (1 << USB_OUT_ACK_SENT)
 284			  | (1 << USB_OUT_NAK_SENT)
 285			  | (1 << USB_IN_ACK_RCVD)
 286			  | (1 << USB_IN_NAK_SENT)
 287			  | (1 << USB_STALL_SENT)
 288			  | (1 << LOCAL_OUT_ZLP)
 289			  | (1 << BUFFER_FLUSH));
 290
 291	/* fifo size is handled separately */
 292}
 293
 294static int net2272_disable(struct usb_ep *_ep)
 295{
 296	struct net2272_ep *ep;
 297	unsigned long flags;
 298
 299	ep = container_of(_ep, struct net2272_ep, ep);
 300	if (!_ep || !ep->desc || _ep->name == ep0name)
 301		return -EINVAL;
 302
 303	spin_lock_irqsave(&ep->dev->lock, flags);
 304	net2272_dequeue_all(ep);
 305	net2272_ep_reset(ep);
 306
 307	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 308
 309	spin_unlock_irqrestore(&ep->dev->lock, flags);
 310	return 0;
 311}
 312
 313/*---------------------------------------------------------------------------*/
 314
 315static struct usb_request *
 316net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 317{
 318	struct net2272_request *req;
 319
 320	if (!_ep)
 321		return NULL;
 322
 323	req = kzalloc(sizeof(*req), gfp_flags);
 324	if (!req)
 325		return NULL;
 326
 327	INIT_LIST_HEAD(&req->queue);
 328
 329	return &req->req;
 330}
 331
 332static void
 333net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 334{
 335	struct net2272_request *req;
 336
 337	if (!_ep || !_req)
 338		return;
 339
 340	req = container_of(_req, struct net2272_request, req);
 341	WARN_ON(!list_empty(&req->queue));
 342	kfree(req);
 343}
 344
 345static void
 346net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 347{
 348	struct net2272 *dev;
 349	unsigned stopped = ep->stopped;
 350
 351	if (ep->num == 0) {
 352		if (ep->dev->protocol_stall) {
 353			ep->stopped = 1;
 354			set_halt(ep);
 355		}
 356		allow_status(ep);
 357	}
 358
 359	list_del_init(&req->queue);
 360
 361	if (req->req.status == -EINPROGRESS)
 362		req->req.status = status;
 363	else
 364		status = req->req.status;
 365
 366	dev = ep->dev;
 367	if (use_dma && ep->dma)
 368		usb_gadget_unmap_request(&dev->gadget, &req->req,
 369				ep->is_in);
 370
 371	if (status && status != -ESHUTDOWN)
 372		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 373			ep->ep.name, &req->req, status,
 374			req->req.actual, req->req.length, req->req.buf);
 375
 376	/* don't modify queue heads during completion callback */
 377	ep->stopped = 1;
 378	spin_unlock(&dev->lock);
 379	usb_gadget_giveback_request(&ep->ep, &req->req);
 380	spin_lock(&dev->lock);
 381	ep->stopped = stopped;
 382}
 383
 384static int
 385net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 386	struct net2272_request *req, unsigned max)
 387{
 388	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 389	u16 *bufp;
 390	unsigned length, count;
 391	u8 tmp;
 392
 393	length = min(req->req.length - req->req.actual, max);
 394	req->req.actual += length;
 395
 396	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 397		ep->ep.name, req, max, length,
 398		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 399
 400	count = length;
 401	bufp = (u16 *)buf;
 402
 403	while (likely(count >= 2)) {
 404		/* no byte-swap required; chip endian set during init */
 405		writew(*bufp++, ep_data);
 406		count -= 2;
 407	}
 408	buf = (u8 *)bufp;
 409
 410	/* write final byte by placing the NET2272 into 8-bit mode */
 411	if (unlikely(count)) {
 412		tmp = net2272_read(ep->dev, LOCCTL);
 413		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 414		writeb(*buf, ep_data);
 415		net2272_write(ep->dev, LOCCTL, tmp);
 416	}
 417	return length;
 418}
 419
 420/* returns: 0: still running, 1: completed, negative: errno */
 421static int
 422net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 423{
 424	u8 *buf;
 425	unsigned count, max;
 426	int status;
 427
 428	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 429		ep->ep.name, req->req.actual, req->req.length);
 430
 431	/*
 432	 * Keep loading the endpoint until the final packet is loaded,
 433	 * or the endpoint buffer is full.
 434	 */
 435 top:
 436	/*
 437	 * Clear interrupt status
 438	 *  - Packet Transmitted interrupt will become set again when the
 439	 *    host successfully takes another packet
 440	 */
 441	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 442	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 443		buf = req->req.buf + req->req.actual;
 444		prefetch(buf);
 445
 446		/* force pagesel */
 447		net2272_ep_read(ep, EP_STAT0);
 448
 449		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 450			(net2272_ep_read(ep, EP_AVAIL0));
 451
 452		if (max < ep->ep.maxpacket)
 453			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 454				| (net2272_ep_read(ep, EP_AVAIL0));
 455
 456		count = net2272_write_packet(ep, buf, req, max);
 457		/* see if we are done */
 458		if (req->req.length == req->req.actual) {
 459			/* validate short or zlp packet */
 460			if (count < ep->ep.maxpacket)
 461				set_fifo_bytecount(ep, 0);
 462			net2272_done(ep, req, 0);
 463
 464			if (!list_empty(&ep->queue)) {
 465				req = list_entry(ep->queue.next,
 466						struct net2272_request,
 467						queue);
 468				status = net2272_kick_dma(ep, req);
 469
 470				if (status < 0)
 471					if ((net2272_ep_read(ep, EP_STAT0)
 472							& (1 << BUFFER_EMPTY)))
 473						goto top;
 474			}
 475			return 1;
 476		}
 477		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 478	}
 479	return 0;
 480}
 481
 482static void
 483net2272_out_flush(struct net2272_ep *ep)
 484{
 485	ASSERT_OUT_NAKING(ep);
 486
 487	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 488			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 489	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 490}
 491
 492static int
 493net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 494	struct net2272_request *req, unsigned avail)
 495{
 496	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 497	unsigned is_short;
 498	u16 *bufp;
 499
 500	req->req.actual += avail;
 501
 502	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 503		ep->ep.name, req, avail,
 504		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 505
 506	is_short = (avail < ep->ep.maxpacket);
 507
 508	if (unlikely(avail == 0)) {
 509		/* remove any zlp from the buffer */
 510		(void)readw(ep_data);
 511		return is_short;
 512	}
 513
 514	/* Ensure we get the final byte */
 515	if (unlikely(avail % 2))
 516		avail++;
 517	bufp = (u16 *)buf;
 518
 519	do {
 520		*bufp++ = readw(ep_data);
 521		avail -= 2;
 522	} while (avail);
 523
 524	/*
 525	 * To avoid false endpoint available race condition must read
 526	 * ep stat0 twice in the case of a short transfer
 527	 */
 528	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 529		net2272_ep_read(ep, EP_STAT0);
 530
 531	return is_short;
 532}
 533
 534static int
 535net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 536{
 537	u8 *buf;
 538	unsigned is_short;
 539	int count;
 540	int tmp;
 541	int cleanup = 0;
 542
 543	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 544		ep->ep.name, req->req.actual, req->req.length);
 545
 546 top:
 547	do {
 548		buf = req->req.buf + req->req.actual;
 549		prefetchw(buf);
 550
 551		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 552			| net2272_ep_read(ep, EP_AVAIL0);
 553
 554		net2272_ep_write(ep, EP_STAT0,
 555			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 556			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 557
 558		tmp = req->req.length - req->req.actual;
 559
 560		if (count > tmp) {
 561			if ((tmp % ep->ep.maxpacket) != 0) {
 562				dev_err(ep->dev->dev,
 563					"%s out fifo %d bytes, expected %d\n",
 564					ep->ep.name, count, tmp);
 565				cleanup = 1;
 566			}
 567			count = (tmp > 0) ? tmp : 0;
 568		}
 569
 570		is_short = net2272_read_packet(ep, buf, req, count);
 571
 572		/* completion */
 573		if (unlikely(cleanup || is_short ||
 574				req->req.actual == req->req.length)) {
 575
 576			if (cleanup) {
 577				net2272_out_flush(ep);
 578				net2272_done(ep, req, -EOVERFLOW);
 579			} else
 580				net2272_done(ep, req, 0);
 581
 582			/* re-initialize endpoint transfer registers
 583			 * otherwise they may result in erroneous pre-validation
 584			 * for subsequent control reads
 585			 */
 586			if (unlikely(ep->num == 0)) {
 587				net2272_ep_write(ep, EP_TRANSFER2, 0);
 588				net2272_ep_write(ep, EP_TRANSFER1, 0);
 589				net2272_ep_write(ep, EP_TRANSFER0, 0);
 590			}
 591
 592			if (!list_empty(&ep->queue)) {
 593				int status;
 594
 595				req = list_entry(ep->queue.next,
 596					struct net2272_request, queue);
 597				status = net2272_kick_dma(ep, req);
 598				if ((status < 0) &&
 599				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 600					goto top;
 601			}
 602			return 1;
 603		}
 604	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 605
 606	return 0;
 607}
 608
 609static void
 610net2272_pio_advance(struct net2272_ep *ep)
 611{
 612	struct net2272_request *req;
 613
 614	if (unlikely(list_empty(&ep->queue)))
 615		return;
 616
 617	req = list_entry(ep->queue.next, struct net2272_request, queue);
 618	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 619}
 620
 621/* returns 0 on success, else negative errno */
 622static int
 623net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 624	unsigned len, unsigned dir)
 625{
 626	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 627		ep, buf, len, dir);
 628
 629	/* The NET2272 only supports a single dma channel */
 630	if (dev->dma_busy)
 631		return -EBUSY;
 632	/*
 633	 * EP_TRANSFER (used to determine the number of bytes received
 634	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 635	 */
 636	if ((dir == 1) && (len > 0x1000000))
 637		return -EINVAL;
 638
 639	dev->dma_busy = 1;
 640
 641	/* initialize platform's dma */
 642#ifdef CONFIG_USB_PCI
 643	/* NET2272 addr, buffer addr, length, etc. */
 644	switch (dev->dev_id) {
 645	case PCI_DEVICE_ID_RDK1:
 646		/* Setup PLX 9054 DMA mode */
 647		writel((1 << LOCAL_BUS_WIDTH) |
 648			(1 << TA_READY_INPUT_ENABLE) |
 649			(0 << LOCAL_BURST_ENABLE) |
 650			(1 << DONE_INTERRUPT_ENABLE) |
 651			(1 << LOCAL_ADDRESSING_MODE) |
 652			(1 << DEMAND_MODE) |
 653			(1 << DMA_EOT_ENABLE) |
 654			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 655			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 656			dev->rdk1.plx9054_base_addr + DMAMODE0);
 657
 658		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 659		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 660		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 661		writel((dir << DIRECTION_OF_TRANSFER) |
 662			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 663			dev->rdk1.plx9054_base_addr + DMADPR0);
 664		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 665			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 666			dev->rdk1.plx9054_base_addr + INTCSR);
 667
 668		break;
 669	}
 670#endif
 671
 672	net2272_write(dev, DMAREQ,
 673		(0 << DMA_BUFFER_VALID) |
 674		(1 << DMA_REQUEST_ENABLE) |
 675		(1 << DMA_CONTROL_DACK) |
 676		(dev->dma_eot_polarity << EOT_POLARITY) |
 677		(dev->dma_dack_polarity << DACK_POLARITY) |
 678		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 679		((ep >> 1) << DMA_ENDPOINT_SELECT));
 680
 681	(void) net2272_read(dev, SCRATCH);
 682
 683	return 0;
 684}
 685
 686static void
 687net2272_start_dma(struct net2272 *dev)
 688{
 689	/* start platform's dma controller */
 690#ifdef CONFIG_USB_PCI
 691	switch (dev->dev_id) {
 692	case PCI_DEVICE_ID_RDK1:
 693		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 694			dev->rdk1.plx9054_base_addr + DMACSR0);
 695		break;
 696	}
 697#endif
 698}
 699
 700/* returns 0 on success, else negative errno */
 701static int
 702net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 703{
 704	unsigned size;
 705	u8 tmp;
 706
 707	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 708		return -EINVAL;
 709
 710	/* don't use dma for odd-length transfers
 711	 * otherwise, we'd need to deal with the last byte with pio
 712	 */
 713	if (req->req.length & 1)
 714		return -EINVAL;
 715
 716	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 717		ep->ep.name, req, (unsigned long long) req->req.dma);
 718
 719	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 720
 721	/* The NET2272 can only use DMA on one endpoint at a time */
 722	if (ep->dev->dma_busy)
 723		return -EBUSY;
 724
 725	/* Make sure we only DMA an even number of bytes (we'll use
 726	 * pio to complete the transfer)
 727	 */
 728	size = req->req.length;
 729	size &= ~1;
 730
 731	/* device-to-host transfer */
 732	if (ep->is_in) {
 733		/* initialize platform's dma controller */
 734		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 735			/* unable to obtain DMA channel; return error and use pio mode */
 736			return -EBUSY;
 737		req->req.actual += size;
 738
 739	/* host-to-device transfer */
 740	} else {
 741		tmp = net2272_ep_read(ep, EP_STAT0);
 742
 743		/* initialize platform's dma controller */
 744		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 745			/* unable to obtain DMA channel; return error and use pio mode */
 746			return -EBUSY;
 747
 748		if (!(tmp & (1 << BUFFER_EMPTY)))
 749			ep->not_empty = 1;
 750		else
 751			ep->not_empty = 0;
 752
 753
 754		/* allow the endpoint's buffer to fill */
 755		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 756
 757		/* this transfer completed and data's already in the fifo
 758		 * return error so pio gets used.
 759		 */
 760		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 761
 762			/* deassert dreq */
 763			net2272_write(ep->dev, DMAREQ,
 764				(0 << DMA_BUFFER_VALID) |
 765				(0 << DMA_REQUEST_ENABLE) |
 766				(1 << DMA_CONTROL_DACK) |
 767				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 768				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 769				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 770				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 771
 772			return -EBUSY;
 773		}
 774	}
 775
 776	/* Don't use per-packet interrupts: use dma interrupts only */
 777	net2272_ep_write(ep, EP_IRQENB, 0);
 778
 779	net2272_start_dma(ep->dev);
 780
 781	return 0;
 782}
 783
 784static void net2272_cancel_dma(struct net2272 *dev)
 785{
 786#ifdef CONFIG_USB_PCI
 787	switch (dev->dev_id) {
 788	case PCI_DEVICE_ID_RDK1:
 789		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 790		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 791		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 792		         (1 << CHANNEL_DONE)))
 793			continue;	/* wait for dma to stabalize */
 794
 795		/* dma abort generates an interrupt */
 796		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 797			dev->rdk1.plx9054_base_addr + DMACSR0);
 798		break;
 799	}
 800#endif
 801
 802	dev->dma_busy = 0;
 803}
 804
 805/*---------------------------------------------------------------------------*/
 806
 807static int
 808net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 809{
 810	struct net2272_request *req;
 811	struct net2272_ep *ep;
 812	struct net2272 *dev;
 813	unsigned long flags;
 814	int status = -1;
 815	u8 s;
 816
 817	req = container_of(_req, struct net2272_request, req);
 818	if (!_req || !_req->complete || !_req->buf
 819			|| !list_empty(&req->queue))
 820		return -EINVAL;
 821	ep = container_of(_ep, struct net2272_ep, ep);
 822	if (!_ep || (!ep->desc && ep->num != 0))
 823		return -EINVAL;
 824	dev = ep->dev;
 825	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 826		return -ESHUTDOWN;
 827
 828	/* set up dma mapping in case the caller didn't */
 829	if (use_dma && ep->dma) {
 830		status = usb_gadget_map_request(&dev->gadget, _req,
 831				ep->is_in);
 832		if (status)
 833			return status;
 834	}
 835
 836	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 837		_ep->name, _req, _req->length, _req->buf,
 838		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 839
 840	spin_lock_irqsave(&dev->lock, flags);
 841
 842	_req->status = -EINPROGRESS;
 843	_req->actual = 0;
 844
 845	/* kickstart this i/o queue? */
 846	if (list_empty(&ep->queue) && !ep->stopped) {
 847		/* maybe there's no control data, just status ack */
 848		if (ep->num == 0 && _req->length == 0) {
 849			net2272_done(ep, req, 0);
 850			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 851			goto done;
 852		}
 853
 854		/* Return zlp, don't let it block subsequent packets */
 855		s = net2272_ep_read(ep, EP_STAT0);
 856		if (s & (1 << BUFFER_EMPTY)) {
 857			/* Buffer is empty check for a blocking zlp, handle it */
 858			if ((s & (1 << NAK_OUT_PACKETS)) &&
 859			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 860				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 861				/*
 862				 * Request is going to terminate with a short packet ...
 863				 * hope the client is ready for it!
 864				 */
 865				status = net2272_read_fifo(ep, req);
 866				/* clear short packet naking */
 867				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 868				goto done;
 869			}
 870		}
 871
 872		/* try dma first */
 873		status = net2272_kick_dma(ep, req);
 874
 875		if (status < 0) {
 876			/* dma failed (most likely in use by another endpoint)
 877			 * fallback to pio
 878			 */
 879			status = 0;
 880
 881			if (ep->is_in)
 882				status = net2272_write_fifo(ep, req);
 883			else {
 884				s = net2272_ep_read(ep, EP_STAT0);
 885				if ((s & (1 << BUFFER_EMPTY)) == 0)
 886					status = net2272_read_fifo(ep, req);
 887			}
 888
 889			if (unlikely(status != 0)) {
 890				if (status > 0)
 891					status = 0;
 892				req = NULL;
 893			}
 894		}
 895	}
 896	if (likely(req))
 897		list_add_tail(&req->queue, &ep->queue);
 898
 899	if (likely(!list_empty(&ep->queue)))
 900		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 901 done:
 902	spin_unlock_irqrestore(&dev->lock, flags);
 903
 904	return 0;
 905}
 906
 907/* dequeue ALL requests */
 908static void
 909net2272_dequeue_all(struct net2272_ep *ep)
 910{
 911	struct net2272_request *req;
 912
 913	/* called with spinlock held */
 914	ep->stopped = 1;
 915
 916	while (!list_empty(&ep->queue)) {
 917		req = list_entry(ep->queue.next,
 918				struct net2272_request,
 919				queue);
 920		net2272_done(ep, req, -ESHUTDOWN);
 921	}
 922}
 923
 924/* dequeue JUST ONE request */
 925static int
 926net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 927{
 928	struct net2272_ep *ep;
 929	struct net2272_request *req = NULL, *iter;
 930	unsigned long flags;
 931	int stopped;
 932
 933	ep = container_of(_ep, struct net2272_ep, ep);
 934	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 935		return -EINVAL;
 936
 937	spin_lock_irqsave(&ep->dev->lock, flags);
 938	stopped = ep->stopped;
 939	ep->stopped = 1;
 940
 941	/* make sure it's still queued on this endpoint */
 942	list_for_each_entry(iter, &ep->queue, queue) {
 943		if (&iter->req != _req)
 944			continue;
 945		req = iter;
 946		break;
 947	}
 948	if (!req) {
 949		ep->stopped = stopped;
 950		spin_unlock_irqrestore(&ep->dev->lock, flags);
 951		return -EINVAL;
 952	}
 953
 954	/* queue head may be partially complete */
 955	if (ep->queue.next == &req->queue) {
 956		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 957		net2272_done(ep, req, -ECONNRESET);
 958	}
 959	ep->stopped = stopped;
 960
 961	spin_unlock_irqrestore(&ep->dev->lock, flags);
 962	return 0;
 963}
 964
 965/*---------------------------------------------------------------------------*/
 966
 967static int
 968net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 969{
 970	struct net2272_ep *ep;
 971	unsigned long flags;
 972	int ret = 0;
 973
 974	ep = container_of(_ep, struct net2272_ep, ep);
 975	if (!_ep || (!ep->desc && ep->num != 0))
 976		return -EINVAL;
 977	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 978		return -ESHUTDOWN;
 979	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 980		return -EINVAL;
 981
 982	spin_lock_irqsave(&ep->dev->lock, flags);
 983	if (!list_empty(&ep->queue))
 984		ret = -EAGAIN;
 985	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
 986		ret = -EAGAIN;
 987	else {
 988		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
 989			value ? "set" : "clear",
 990			wedged ? "wedge" : "halt");
 991		/* set/clear */
 992		if (value) {
 993			if (ep->num == 0)
 994				ep->dev->protocol_stall = 1;
 995			else
 996				set_halt(ep);
 997			if (wedged)
 998				ep->wedged = 1;
 999		} else {
1000			clear_halt(ep);
1001			ep->wedged = 0;
1002		}
1003	}
1004	spin_unlock_irqrestore(&ep->dev->lock, flags);
1005
1006	return ret;
1007}
1008
1009static int
1010net2272_set_halt(struct usb_ep *_ep, int value)
1011{
1012	return net2272_set_halt_and_wedge(_ep, value, 0);
1013}
1014
1015static int
1016net2272_set_wedge(struct usb_ep *_ep)
1017{
1018	if (!_ep || _ep->name == ep0name)
1019		return -EINVAL;
1020	return net2272_set_halt_and_wedge(_ep, 1, 1);
1021}
1022
1023static int
1024net2272_fifo_status(struct usb_ep *_ep)
1025{
1026	struct net2272_ep *ep;
1027	u16 avail;
1028
1029	ep = container_of(_ep, struct net2272_ep, ep);
1030	if (!_ep || (!ep->desc && ep->num != 0))
1031		return -ENODEV;
1032	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1033		return -ESHUTDOWN;
1034
1035	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1036	avail |= net2272_ep_read(ep, EP_AVAIL0);
1037	if (avail > ep->fifo_size)
1038		return -EOVERFLOW;
1039	if (ep->is_in)
1040		avail = ep->fifo_size - avail;
1041	return avail;
1042}
1043
1044static void
1045net2272_fifo_flush(struct usb_ep *_ep)
1046{
1047	struct net2272_ep *ep;
1048
1049	ep = container_of(_ep, struct net2272_ep, ep);
1050	if (!_ep || (!ep->desc && ep->num != 0))
1051		return;
1052	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1053		return;
1054
1055	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1056}
1057
1058static const struct usb_ep_ops net2272_ep_ops = {
1059	.enable        = net2272_enable,
1060	.disable       = net2272_disable,
1061
1062	.alloc_request = net2272_alloc_request,
1063	.free_request  = net2272_free_request,
1064
1065	.queue         = net2272_queue,
1066	.dequeue       = net2272_dequeue,
1067
1068	.set_halt      = net2272_set_halt,
1069	.set_wedge     = net2272_set_wedge,
1070	.fifo_status   = net2272_fifo_status,
1071	.fifo_flush    = net2272_fifo_flush,
1072};
1073
1074/*---------------------------------------------------------------------------*/
1075
1076static int
1077net2272_get_frame(struct usb_gadget *_gadget)
1078{
1079	struct net2272 *dev;
1080	unsigned long flags;
1081	u16 ret;
1082
1083	if (!_gadget)
1084		return -ENODEV;
1085	dev = container_of(_gadget, struct net2272, gadget);
1086	spin_lock_irqsave(&dev->lock, flags);
1087
1088	ret = net2272_read(dev, FRAME1) << 8;
1089	ret |= net2272_read(dev, FRAME0);
1090
1091	spin_unlock_irqrestore(&dev->lock, flags);
1092	return ret;
1093}
1094
1095static int
1096net2272_wakeup(struct usb_gadget *_gadget)
1097{
1098	struct net2272 *dev;
1099	u8 tmp;
1100	unsigned long flags;
1101
1102	if (!_gadget)
1103		return 0;
1104	dev = container_of(_gadget, struct net2272, gadget);
1105
1106	spin_lock_irqsave(&dev->lock, flags);
1107	tmp = net2272_read(dev, USBCTL0);
1108	if (tmp & (1 << IO_WAKEUP_ENABLE))
1109		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1110
1111	spin_unlock_irqrestore(&dev->lock, flags);
1112
1113	return 0;
1114}
1115
1116static int
1117net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1118{
1119	if (!_gadget)
1120		return -ENODEV;
1121
1122	_gadget->is_selfpowered = (value != 0);
1123
1124	return 0;
1125}
1126
1127static int
1128net2272_pullup(struct usb_gadget *_gadget, int is_on)
1129{
1130	struct net2272 *dev;
1131	u8 tmp;
1132	unsigned long flags;
1133
1134	if (!_gadget)
1135		return -ENODEV;
1136	dev = container_of(_gadget, struct net2272, gadget);
1137
1138	spin_lock_irqsave(&dev->lock, flags);
1139	tmp = net2272_read(dev, USBCTL0);
1140	dev->softconnect = (is_on != 0);
1141	if (is_on)
1142		tmp |= (1 << USB_DETECT_ENABLE);
1143	else
1144		tmp &= ~(1 << USB_DETECT_ENABLE);
1145	net2272_write(dev, USBCTL0, tmp);
1146	spin_unlock_irqrestore(&dev->lock, flags);
1147
1148	return 0;
1149}
1150
1151static int net2272_start(struct usb_gadget *_gadget,
1152		struct usb_gadget_driver *driver);
1153static int net2272_stop(struct usb_gadget *_gadget);
1154static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
1155
1156static const struct usb_gadget_ops net2272_ops = {
1157	.get_frame	= net2272_get_frame,
1158	.wakeup		= net2272_wakeup,
1159	.set_selfpowered = net2272_set_selfpowered,
1160	.pullup		= net2272_pullup,
1161	.udc_start	= net2272_start,
1162	.udc_stop	= net2272_stop,
1163	.udc_async_callbacks = net2272_async_callbacks,
1164};
1165
1166/*---------------------------------------------------------------------------*/
1167
1168static ssize_t
1169registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1170{
1171	struct net2272 *dev;
1172	char *next;
1173	unsigned size, t;
1174	unsigned long flags;
1175	u8 t1, t2;
1176	int i;
1177	const char *s;
1178
1179	dev = dev_get_drvdata(_dev);
1180	next = buf;
1181	size = PAGE_SIZE;
1182	spin_lock_irqsave(&dev->lock, flags);
1183
1184	/* Main Control Registers */
1185	t = scnprintf(next, size, "%s version %s,"
1186		"chiprev %02x, locctl %02x\n"
1187		"irqenb0 %02x irqenb1 %02x "
1188		"irqstat0 %02x irqstat1 %02x\n",
1189		driver_name, driver_vers, dev->chiprev,
1190		net2272_read(dev, LOCCTL),
1191		net2272_read(dev, IRQENB0),
1192		net2272_read(dev, IRQENB1),
1193		net2272_read(dev, IRQSTAT0),
1194		net2272_read(dev, IRQSTAT1));
1195	size -= t;
1196	next += t;
1197
1198	/* DMA */
1199	t1 = net2272_read(dev, DMAREQ);
1200	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1201		t1, ep_name[(t1 & 0x01) + 1],
1202		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1203		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1204		t1 & (1 << DMA_REQUEST) ? "req " : "",
1205		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1206	size -= t;
1207	next += t;
1208
1209	/* USB Control Registers */
1210	t1 = net2272_read(dev, USBCTL1);
1211	if (t1 & (1 << VBUS_PIN)) {
1212		if (t1 & (1 << USB_HIGH_SPEED))
1213			s = "high speed";
1214		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1215			s = "powered";
1216		else
1217			s = "full speed";
1218	} else
1219		s = "not attached";
1220	t = scnprintf(next, size,
1221		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1222		net2272_read(dev, USBCTL0), t1,
1223		net2272_read(dev, OURADDR), s);
1224	size -= t;
1225	next += t;
1226
1227	/* Endpoint Registers */
1228	for (i = 0; i < 4; ++i) {
1229		struct net2272_ep *ep;
1230
1231		ep = &dev->ep[i];
1232		if (i && !ep->desc)
1233			continue;
1234
1235		t1 = net2272_ep_read(ep, EP_CFG);
1236		t2 = net2272_ep_read(ep, EP_RSPSET);
1237		t = scnprintf(next, size,
1238			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1239			"irqenb %02x\n",
1240			ep->ep.name, t1, t2,
1241			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1242			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1243			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1244			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1245			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1246			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1247			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1248			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1249			net2272_ep_read(ep, EP_IRQENB));
1250		size -= t;
1251		next += t;
1252
1253		t = scnprintf(next, size,
1254			"\tstat0 %02x stat1 %02x avail %04x "
1255			"(ep%d%s-%s)%s\n",
1256			net2272_ep_read(ep, EP_STAT0),
1257			net2272_ep_read(ep, EP_STAT1),
1258			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1259			t1 & 0x0f,
1260			ep->is_in ? "in" : "out",
1261			type_string(t1 >> 5),
1262			ep->stopped ? "*" : "");
1263		size -= t;
1264		next += t;
1265
1266		t = scnprintf(next, size,
1267			"\tep_transfer %06x\n",
1268			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1269			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1270			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1271		size -= t;
1272		next += t;
1273
1274		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1275		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1276		t = scnprintf(next, size,
1277			"\tbuf-a %s buf-b %s\n",
1278			buf_state_string(t1),
1279			buf_state_string(t2));
1280		size -= t;
1281		next += t;
1282	}
1283
1284	spin_unlock_irqrestore(&dev->lock, flags);
1285
1286	return PAGE_SIZE - size;
1287}
1288static DEVICE_ATTR_RO(registers);
1289
1290/*---------------------------------------------------------------------------*/
1291
1292static void
1293net2272_set_fifo_mode(struct net2272 *dev, int mode)
1294{
1295	u8 tmp;
1296
1297	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1298	tmp |= (mode << 6);
1299	net2272_write(dev, LOCCTL, tmp);
1300
1301	INIT_LIST_HEAD(&dev->gadget.ep_list);
1302
1303	/* always ep-a, ep-c ... maybe not ep-b */
1304	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1305
1306	switch (mode) {
1307	case 0:
1308		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1309		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1310		break;
1311	case 1:
1312		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1313		dev->ep[1].fifo_size = 1024;
1314		dev->ep[2].fifo_size = 512;
1315		break;
1316	case 2:
1317		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1318		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1319		break;
1320	case 3:
1321		dev->ep[1].fifo_size = 1024;
1322		break;
1323	}
1324
1325	/* ep-c is always 2 512 byte buffers */
1326	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1327	dev->ep[3].fifo_size = 512;
1328}
1329
1330/*---------------------------------------------------------------------------*/
1331
1332static void
1333net2272_usb_reset(struct net2272 *dev)
1334{
1335	dev->gadget.speed = USB_SPEED_UNKNOWN;
1336
1337	net2272_cancel_dma(dev);
1338
1339	net2272_write(dev, IRQENB0, 0);
1340	net2272_write(dev, IRQENB1, 0);
1341
1342	/* clear irq state */
1343	net2272_write(dev, IRQSTAT0, 0xff);
1344	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1345
1346	net2272_write(dev, DMAREQ,
1347		(0 << DMA_BUFFER_VALID) |
1348		(0 << DMA_REQUEST_ENABLE) |
1349		(1 << DMA_CONTROL_DACK) |
1350		(dev->dma_eot_polarity << EOT_POLARITY) |
1351		(dev->dma_dack_polarity << DACK_POLARITY) |
1352		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1353		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1354
1355	net2272_cancel_dma(dev);
1356	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1357
1358	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1359	 * note that the higher level gadget drivers are expected to convert data to little endian.
1360	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1361	 */
1362	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1363	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1364}
1365
1366static void
1367net2272_usb_reinit(struct net2272 *dev)
1368{
1369	int i;
1370
1371	/* basic endpoint init */
1372	for (i = 0; i < 4; ++i) {
1373		struct net2272_ep *ep = &dev->ep[i];
1374
1375		ep->ep.name = ep_name[i];
1376		ep->dev = dev;
1377		ep->num = i;
1378		ep->not_empty = 0;
1379
1380		if (use_dma && ep->num == dma_ep)
1381			ep->dma = 1;
1382
1383		if (i > 0 && i <= 3)
1384			ep->fifo_size = 512;
1385		else
1386			ep->fifo_size = 64;
1387		net2272_ep_reset(ep);
1388
1389		if (i == 0) {
1390			ep->ep.caps.type_control = true;
1391		} else {
1392			ep->ep.caps.type_iso = true;
1393			ep->ep.caps.type_bulk = true;
1394			ep->ep.caps.type_int = true;
1395		}
1396
1397		ep->ep.caps.dir_in = true;
1398		ep->ep.caps.dir_out = true;
1399	}
1400	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1401
1402	dev->gadget.ep0 = &dev->ep[0].ep;
1403	dev->ep[0].stopped = 0;
1404	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1405}
1406
1407static void
1408net2272_ep0_start(struct net2272 *dev)
1409{
1410	struct net2272_ep *ep0 = &dev->ep[0];
1411
1412	net2272_ep_write(ep0, EP_RSPSET,
1413		(1 << NAK_OUT_PACKETS_MODE) |
1414		(1 << ALT_NAK_OUT_PACKETS));
1415	net2272_ep_write(ep0, EP_RSPCLR,
1416		(1 << HIDE_STATUS_PHASE) |
1417		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1418	net2272_write(dev, USBCTL0,
1419		(dev->softconnect << USB_DETECT_ENABLE) |
1420		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1421		(1 << IO_WAKEUP_ENABLE));
1422	net2272_write(dev, IRQENB0,
1423		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1424		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1425		(1 << DMA_DONE_INTERRUPT_ENABLE));
1426	net2272_write(dev, IRQENB1,
1427		(1 << VBUS_INTERRUPT_ENABLE) |
1428		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1429		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1430}
1431
1432/* when a driver is successfully registered, it will receive
1433 * control requests including set_configuration(), which enables
1434 * non-control requests.  then usb traffic follows until a
1435 * disconnect is reported.  then a host may connect again, or
1436 * the driver might get unbound.
1437 */
1438static int net2272_start(struct usb_gadget *_gadget,
1439		struct usb_gadget_driver *driver)
1440{
1441	struct net2272 *dev;
1442	unsigned i;
1443
1444	if (!driver || !driver->setup ||
1445	    driver->max_speed != USB_SPEED_HIGH)
1446		return -EINVAL;
1447
1448	dev = container_of(_gadget, struct net2272, gadget);
1449
1450	for (i = 0; i < 4; ++i)
1451		dev->ep[i].irqs = 0;
1452	/* hook up the driver ... */
1453	dev->softconnect = 1;
1454	dev->driver = driver;
1455
1456	/* ... then enable host detection and ep0; and we're ready
1457	 * for set_configuration as well as eventual disconnect.
1458	 */
1459	net2272_ep0_start(dev);
1460
1461	return 0;
1462}
1463
1464static void
1465stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1466{
1467	int i;
1468
1469	/* don't disconnect if it's not connected */
1470	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1471		driver = NULL;
1472
1473	/* stop hardware; prevent new request submissions;
1474	 * and kill any outstanding requests.
1475	 */
1476	net2272_usb_reset(dev);
1477	for (i = 0; i < 4; ++i)
1478		net2272_dequeue_all(&dev->ep[i]);
1479
1480	/* report disconnect; the driver is already quiesced */
1481	if (dev->async_callbacks && driver) {
1482		spin_unlock(&dev->lock);
1483		driver->disconnect(&dev->gadget);
1484		spin_lock(&dev->lock);
1485	}
1486
1487	net2272_usb_reinit(dev);
1488}
1489
1490static int net2272_stop(struct usb_gadget *_gadget)
1491{
1492	struct net2272 *dev;
1493	unsigned long flags;
1494
1495	dev = container_of(_gadget, struct net2272, gadget);
1496
1497	spin_lock_irqsave(&dev->lock, flags);
1498	stop_activity(dev, NULL);
1499	spin_unlock_irqrestore(&dev->lock, flags);
1500
1501	dev->driver = NULL;
1502
1503	return 0;
1504}
1505
1506static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
1507{
1508	struct net2272	*dev = container_of(_gadget, struct net2272, gadget);
1509
1510	spin_lock_irq(&dev->lock);
1511	dev->async_callbacks = enable;
1512	spin_unlock_irq(&dev->lock);
1513}
1514
1515/*---------------------------------------------------------------------------*/
1516/* handle ep-a/ep-b dma completions */
1517static void
1518net2272_handle_dma(struct net2272_ep *ep)
1519{
1520	struct net2272_request *req;
1521	unsigned len;
1522	int status;
1523
1524	if (!list_empty(&ep->queue))
1525		req = list_entry(ep->queue.next,
1526				struct net2272_request, queue);
1527	else
1528		req = NULL;
1529
1530	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1531
1532	/* Ensure DREQ is de-asserted */
1533	net2272_write(ep->dev, DMAREQ,
1534		(0 << DMA_BUFFER_VALID)
1535	      | (0 << DMA_REQUEST_ENABLE)
1536	      | (1 << DMA_CONTROL_DACK)
1537	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1538	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1539	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1540	      | (ep->dma << DMA_ENDPOINT_SELECT));
1541
1542	ep->dev->dma_busy = 0;
1543
1544	net2272_ep_write(ep, EP_IRQENB,
1545		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1546		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1547		| net2272_ep_read(ep, EP_IRQENB));
1548
1549	/* device-to-host transfer completed */
1550	if (ep->is_in) {
1551		/* validate a short packet or zlp if necessary */
1552		if ((req->req.length % ep->ep.maxpacket != 0) ||
1553				req->req.zero)
1554			set_fifo_bytecount(ep, 0);
1555
1556		net2272_done(ep, req, 0);
1557		if (!list_empty(&ep->queue)) {
1558			req = list_entry(ep->queue.next,
1559					struct net2272_request, queue);
1560			status = net2272_kick_dma(ep, req);
1561			if (status < 0)
1562				net2272_pio_advance(ep);
1563		}
1564
1565	/* host-to-device transfer completed */
1566	} else {
1567		/* terminated with a short packet? */
1568		if (net2272_read(ep->dev, IRQSTAT0) &
1569				(1 << DMA_DONE_INTERRUPT)) {
1570			/* abort system dma */
1571			net2272_cancel_dma(ep->dev);
1572		}
1573
1574		/* EP_TRANSFER will contain the number of bytes
1575		 * actually received.
1576		 * NOTE: There is no overflow detection on EP_TRANSFER:
1577		 * We can't deal with transfers larger than 2^24 bytes!
1578		 */
1579		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1580			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1581			| (net2272_ep_read(ep, EP_TRANSFER0));
1582
1583		if (ep->not_empty)
1584			len += 4;
1585
1586		req->req.actual += len;
1587
1588		/* get any remaining data */
1589		net2272_pio_advance(ep);
1590	}
1591}
1592
1593/*---------------------------------------------------------------------------*/
1594
1595static void
1596net2272_handle_ep(struct net2272_ep *ep)
1597{
1598	struct net2272_request *req;
1599	u8 stat0, stat1;
1600
1601	if (!list_empty(&ep->queue))
1602		req = list_entry(ep->queue.next,
1603			struct net2272_request, queue);
1604	else
1605		req = NULL;
1606
1607	/* ack all, and handle what we care about */
1608	stat0 = net2272_ep_read(ep, EP_STAT0);
1609	stat1 = net2272_ep_read(ep, EP_STAT1);
1610	ep->irqs++;
1611
1612	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1613		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1614
1615	net2272_ep_write(ep, EP_STAT0, stat0 &
1616		~((1 << NAK_OUT_PACKETS)
1617		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1618	net2272_ep_write(ep, EP_STAT1, stat1);
1619
1620	/* data packet(s) received (in the fifo, OUT)
1621	 * direction must be validated, otherwise control read status phase
1622	 * could be interpreted as a valid packet
1623	 */
1624	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1625		net2272_pio_advance(ep);
1626	/* data packet(s) transmitted (IN) */
1627	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1628		net2272_pio_advance(ep);
1629}
1630
1631static struct net2272_ep *
1632net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1633{
1634	struct net2272_ep *ep;
1635
1636	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1637		return &dev->ep[0];
1638
1639	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1640		u8 bEndpointAddress;
1641
1642		if (!ep->desc)
1643			continue;
1644		bEndpointAddress = ep->desc->bEndpointAddress;
1645		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1646			continue;
1647		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1648			return ep;
1649	}
1650	return NULL;
1651}
1652
1653/*
1654 * USB Test Packet:
1655 * JKJKJKJK * 9
1656 * JJKKJJKK * 8
1657 * JJJJKKKK * 8
1658 * JJJJJJJKKKKKKK * 8
1659 * JJJJJJJK * 8
1660 * {JKKKKKKK * 10}, JK
1661 */
1662static const u8 net2272_test_packet[] = {
1663	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1664	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1665	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1666	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1667	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1668	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1669};
1670
1671static void
1672net2272_set_test_mode(struct net2272 *dev, int mode)
1673{
1674	int i;
1675
1676	/* Disable all net2272 interrupts:
1677	 * Nothing but a power cycle should stop the test.
1678	 */
1679	net2272_write(dev, IRQENB0, 0x00);
1680	net2272_write(dev, IRQENB1, 0x00);
1681
1682	/* Force tranceiver to high-speed */
1683	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1684
1685	net2272_write(dev, PAGESEL, 0);
1686	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1687	net2272_write(dev, EP_RSPCLR,
1688			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1689			| (1 << HIDE_STATUS_PHASE));
1690	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1691	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1692
1693	/* wait for status phase to complete */
1694	while (!(net2272_read(dev, EP_STAT0) &
1695				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1696		;
1697
1698	/* Enable test mode */
1699	net2272_write(dev, USBTEST, mode);
1700
1701	/* load test packet */
1702	if (mode == USB_TEST_PACKET) {
1703		/* switch to 8 bit mode */
1704		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1705				~(1 << DATA_WIDTH));
1706
1707		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1708			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1709
1710		/* Validate test packet */
1711		net2272_write(dev, EP_TRANSFER0, 0);
1712	}
1713}
1714
1715static void
1716net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1717{
1718	struct net2272_ep *ep;
1719	u8 num, scratch;
1720
1721	/* starting a control request? */
1722	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1723		union {
1724			u8 raw[8];
1725			struct usb_ctrlrequest	r;
1726		} u;
1727		int tmp = 0;
1728		struct net2272_request *req;
1729
1730		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1731			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1732				dev->gadget.speed = USB_SPEED_HIGH;
1733			else
1734				dev->gadget.speed = USB_SPEED_FULL;
1735			dev_dbg(dev->dev, "%s\n",
1736				usb_speed_string(dev->gadget.speed));
1737		}
1738
1739		ep = &dev->ep[0];
1740		ep->irqs++;
1741
1742		/* make sure any leftover interrupt state is cleared */
1743		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1744		while (!list_empty(&ep->queue)) {
1745			req = list_entry(ep->queue.next,
1746				struct net2272_request, queue);
1747			net2272_done(ep, req,
1748				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1749		}
1750		ep->stopped = 0;
1751		dev->protocol_stall = 0;
1752		net2272_ep_write(ep, EP_STAT0,
1753			    (1 << DATA_IN_TOKEN_INTERRUPT)
1754			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1755			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1756			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1757			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1758		net2272_ep_write(ep, EP_STAT1,
1759			    (1 << TIMEOUT)
1760			  | (1 << USB_OUT_ACK_SENT)
1761			  | (1 << USB_OUT_NAK_SENT)
1762			  | (1 << USB_IN_ACK_RCVD)
1763			  | (1 << USB_IN_NAK_SENT)
1764			  | (1 << USB_STALL_SENT)
1765			  | (1 << LOCAL_OUT_ZLP));
1766
1767		/*
1768		 * Ensure Control Read pre-validation setting is beyond maximum size
1769		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1770		 *    an EP0 transfer following the Control Write is a Control Read,
1771		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1772		 *    pre-validation count.
1773		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1774		 *    the pre-validation count cannot cause an unexpected validatation
1775		 */
1776		net2272_write(dev, PAGESEL, 0);
1777		net2272_write(dev, EP_TRANSFER2, 0xff);
1778		net2272_write(dev, EP_TRANSFER1, 0xff);
1779		net2272_write(dev, EP_TRANSFER0, 0xff);
1780
1781		u.raw[0] = net2272_read(dev, SETUP0);
1782		u.raw[1] = net2272_read(dev, SETUP1);
1783		u.raw[2] = net2272_read(dev, SETUP2);
1784		u.raw[3] = net2272_read(dev, SETUP3);
1785		u.raw[4] = net2272_read(dev, SETUP4);
1786		u.raw[5] = net2272_read(dev, SETUP5);
1787		u.raw[6] = net2272_read(dev, SETUP6);
1788		u.raw[7] = net2272_read(dev, SETUP7);
1789		/*
1790		 * If you have a big endian cpu make sure le16_to_cpus
1791		 * performs the proper byte swapping here...
1792		 */
1793		le16_to_cpus(&u.r.wValue);
1794		le16_to_cpus(&u.r.wIndex);
1795		le16_to_cpus(&u.r.wLength);
1796
1797		/* ack the irq */
1798		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1799		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1800
1801		/* watch control traffic at the token level, and force
1802		 * synchronization before letting the status phase happen.
1803		 */
1804		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1805		if (ep->is_in) {
1806			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1807				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1808				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1809			stop_out_naking(ep);
1810		} else
1811			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1812				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814		net2272_ep_write(ep, EP_IRQENB, scratch);
1815
1816		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1817			goto delegate;
1818		switch (u.r.bRequest) {
1819		case USB_REQ_GET_STATUS: {
1820			struct net2272_ep *e;
1821			u16 status = 0;
1822
1823			switch (u.r.bRequestType & USB_RECIP_MASK) {
1824			case USB_RECIP_ENDPOINT:
1825				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1826				if (!e || u.r.wLength > 2)
1827					goto do_stall;
1828				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1829					status = cpu_to_le16(1);
1830				else
1831					status = cpu_to_le16(0);
1832
1833				/* don't bother with a request object! */
1834				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1835				writew(status, net2272_reg_addr(dev, EP_DATA));
1836				set_fifo_bytecount(&dev->ep[0], 0);
1837				allow_status(ep);
1838				dev_vdbg(dev->dev, "%s stat %02x\n",
1839					ep->ep.name, status);
1840				goto next_endpoints;
1841			case USB_RECIP_DEVICE:
1842				if (u.r.wLength > 2)
1843					goto do_stall;
1844				if (dev->gadget.is_selfpowered)
1845					status = (1 << USB_DEVICE_SELF_POWERED);
1846
1847				/* don't bother with a request object! */
1848				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849				writew(status, net2272_reg_addr(dev, EP_DATA));
1850				set_fifo_bytecount(&dev->ep[0], 0);
1851				allow_status(ep);
1852				dev_vdbg(dev->dev, "device stat %02x\n", status);
1853				goto next_endpoints;
1854			case USB_RECIP_INTERFACE:
1855				if (u.r.wLength > 2)
1856					goto do_stall;
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "interface status %02x\n", status);
1864				goto next_endpoints;
1865			}
1866
1867			break;
1868		}
1869		case USB_REQ_CLEAR_FEATURE: {
1870			struct net2272_ep *e;
1871
1872			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1873				goto delegate;
1874			if (u.r.wValue != USB_ENDPOINT_HALT ||
1875			    u.r.wLength != 0)
1876				goto do_stall;
1877			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1878			if (!e)
1879				goto do_stall;
1880			if (e->wedged) {
1881				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1882					ep->ep.name);
1883			} else {
1884				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1885				clear_halt(e);
1886			}
1887			allow_status(ep);
1888			goto next_endpoints;
1889		}
1890		case USB_REQ_SET_FEATURE: {
1891			struct net2272_ep *e;
1892
1893			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894				if (u.r.wIndex != NORMAL_OPERATION)
1895					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1896				allow_status(ep);
1897				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1898				goto next_endpoints;
1899			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1900				goto delegate;
1901			if (u.r.wValue != USB_ENDPOINT_HALT ||
1902			    u.r.wLength != 0)
1903				goto do_stall;
1904			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1905			if (!e)
1906				goto do_stall;
1907			set_halt(e);
1908			allow_status(ep);
1909			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1910			goto next_endpoints;
1911		}
1912		case USB_REQ_SET_ADDRESS: {
1913			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1914			allow_status(ep);
1915			break;
1916		}
1917		default:
1918 delegate:
1919			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1920				"ep_cfg %08x\n",
1921				u.r.bRequestType, u.r.bRequest,
1922				u.r.wValue, u.r.wIndex,
1923				net2272_ep_read(ep, EP_CFG));
1924			if (dev->async_callbacks) {
1925				spin_unlock(&dev->lock);
1926				tmp = dev->driver->setup(&dev->gadget, &u.r);
1927				spin_lock(&dev->lock);
1928			}
1929		}
1930
1931		/* stall ep0 on error */
1932		if (tmp < 0) {
1933 do_stall:
1934			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1935				u.r.bRequestType, u.r.bRequest, tmp);
1936			dev->protocol_stall = 1;
1937		}
1938	/* endpoint dma irq? */
1939	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1940		net2272_cancel_dma(dev);
1941		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1942		stat &= ~(1 << DMA_DONE_INTERRUPT);
1943		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1944			? 2 : 1;
1945
1946		ep = &dev->ep[num];
1947		net2272_handle_dma(ep);
1948	}
1949
1950 next_endpoints:
1951	/* endpoint data irq? */
1952	scratch = stat & 0x0f;
1953	stat &= ~0x0f;
1954	for (num = 0; scratch; num++) {
1955		u8 t;
1956
1957		/* does this endpoint's FIFO and queue need tending? */
1958		t = 1 << num;
1959		if ((scratch & t) == 0)
1960			continue;
1961		scratch ^= t;
1962
1963		ep = &dev->ep[num];
1964		net2272_handle_ep(ep);
1965	}
1966
1967	/* some interrupts we can just ignore */
1968	stat &= ~(1 << SOF_INTERRUPT);
1969
1970	if (stat)
1971		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1972}
1973
1974static void
1975net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1976{
1977	u8 tmp, mask;
1978
1979	/* after disconnect there's nothing else to do! */
1980	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1981	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1982
1983	if (stat & tmp) {
1984		bool	reset = false;
1985		bool	disconnect = false;
1986
1987		/*
1988		 * Ignore disconnects and resets if the speed hasn't been set.
1989		 * VBUS can bounce and there's always an initial reset.
1990		 */
1991		net2272_write(dev, IRQSTAT1, tmp);
1992		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1993			if ((stat & (1 << VBUS_INTERRUPT)) &&
1994					(net2272_read(dev, USBCTL1) &
1995						(1 << VBUS_PIN)) == 0) {
1996				disconnect = true;
1997				dev_dbg(dev->dev, "disconnect %s\n",
1998					dev->driver->driver.name);
1999			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2000					(net2272_read(dev, USBCTL1) & mask)
2001						== 0) {
2002				reset = true;
2003				dev_dbg(dev->dev, "reset %s\n",
2004					dev->driver->driver.name);
2005			}
2006
2007			if (disconnect || reset) {
2008				stop_activity(dev, dev->driver);
2009				net2272_ep0_start(dev);
2010				if (dev->async_callbacks) {
2011					spin_unlock(&dev->lock);
2012					if (reset)
2013						usb_gadget_udc_reset(&dev->gadget, dev->driver);
2014					else
2015						(dev->driver->disconnect)(&dev->gadget);
2016					spin_lock(&dev->lock);
2017				}
2018				return;
2019			}
2020		}
2021		stat &= ~tmp;
2022
2023		if (!stat)
2024			return;
2025	}
2026
2027	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2028	if (stat & tmp) {
2029		net2272_write(dev, IRQSTAT1, tmp);
2030		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2031			if (dev->async_callbacks && dev->driver->suspend)
2032				dev->driver->suspend(&dev->gadget);
2033			if (!enable_suspend) {
2034				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2035				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2036			}
2037		} else {
2038			if (dev->async_callbacks && dev->driver->resume)
2039				dev->driver->resume(&dev->gadget);
2040		}
2041		stat &= ~tmp;
2042	}
2043
2044	/* clear any other status/irqs */
2045	if (stat)
2046		net2272_write(dev, IRQSTAT1, stat);
2047
2048	/* some status we can just ignore */
2049	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2050			| (1 << SUSPEND_REQUEST_INTERRUPT)
2051			| (1 << RESUME_INTERRUPT));
2052	if (!stat)
2053		return;
2054	else
2055		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2056}
2057
2058static irqreturn_t net2272_irq(int irq, void *_dev)
2059{
2060	struct net2272 *dev = _dev;
2061#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2062	u32 intcsr;
2063#endif
2064#if defined(PLX_PCI_RDK)
2065	u8 dmareq;
2066#endif
2067	spin_lock(&dev->lock);
2068#if defined(PLX_PCI_RDK)
2069	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2070
2071	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2072		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2073				dev->rdk1.plx9054_base_addr + INTCSR);
2074		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2075		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2076		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2077		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2078			dev->rdk1.plx9054_base_addr + INTCSR);
2079	}
2080	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2081		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2082				dev->rdk1.plx9054_base_addr + DMACSR0);
2083
2084		dmareq = net2272_read(dev, DMAREQ);
2085		if (dmareq & 0x01)
2086			net2272_handle_dma(&dev->ep[2]);
2087		else
2088			net2272_handle_dma(&dev->ep[1]);
2089	}
2090#endif
2091#if defined(PLX_PCI_RDK2)
2092	/* see if PCI int for us by checking irqstat */
2093	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2094	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2095		spin_unlock(&dev->lock);
2096		return IRQ_NONE;
2097	}
2098	/* check dma interrupts */
2099#endif
2100	/* Platform/device interrupt handler */
2101#if !defined(PLX_PCI_RDK)
2102	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2103	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2104#endif
2105	spin_unlock(&dev->lock);
2106
2107	return IRQ_HANDLED;
2108}
2109
2110static int net2272_present(struct net2272 *dev)
2111{
2112	/*
2113	 * Quick test to see if CPU can communicate properly with the NET2272.
2114	 * Verifies connection using writes and reads to write/read and
2115	 * read-only registers.
2116	 *
2117	 * This routine is strongly recommended especially during early bring-up
2118	 * of new hardware, however for designs that do not apply Power On System
2119	 * Tests (POST) it may discarded (or perhaps minimized).
2120	 */
2121	unsigned int ii;
2122	u8 val, refval;
2123
2124	/* Verify NET2272 write/read SCRATCH register can write and read */
2125	refval = net2272_read(dev, SCRATCH);
2126	for (ii = 0; ii < 0x100; ii += 7) {
2127		net2272_write(dev, SCRATCH, ii);
2128		val = net2272_read(dev, SCRATCH);
2129		if (val != ii) {
2130			dev_dbg(dev->dev,
2131				"%s: write/read SCRATCH register test failed: "
2132				"wrote:0x%2.2x, read:0x%2.2x\n",
2133				__func__, ii, val);
2134			return -EINVAL;
2135		}
2136	}
2137	/* To be nice, we write the original SCRATCH value back: */
2138	net2272_write(dev, SCRATCH, refval);
2139
2140	/* Verify NET2272 CHIPREV register is read-only: */
2141	refval = net2272_read(dev, CHIPREV_2272);
2142	for (ii = 0; ii < 0x100; ii += 7) {
2143		net2272_write(dev, CHIPREV_2272, ii);
2144		val = net2272_read(dev, CHIPREV_2272);
2145		if (val != refval) {
2146			dev_dbg(dev->dev,
2147				"%s: write/read CHIPREV register test failed: "
2148				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2149				__func__, ii, val, refval);
2150			return -EINVAL;
2151		}
2152	}
2153
2154	/*
2155	 * Verify NET2272's "NET2270 legacy revision" register
2156	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2157	 *    register should read the same value, regardless of the NET2272
2158	 *    silicon revision.  The legacy register applies to NET2270
2159	 *    firmware being applied to the NET2272.
2160	 */
2161	val = net2272_read(dev, CHIPREV_LEGACY);
2162	if (val != NET2270_LEGACY_REV) {
2163		/*
2164		 * Unexpected legacy revision value
2165		 * - Perhaps the chip is a NET2270?
2166		 */
2167		dev_dbg(dev->dev,
2168			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2169			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2170			__func__, NET2270_LEGACY_REV, val);
2171		return -EINVAL;
2172	}
2173
2174	/*
2175	 * Verify NET2272 silicon revision
2176	 *  - This revision register is appropriate for the silicon version
2177	 *    of the NET2272
2178	 */
2179	val = net2272_read(dev, CHIPREV_2272);
2180	switch (val) {
2181	case CHIPREV_NET2272_R1:
2182		/*
2183		 * NET2272 Rev 1 has DMA related errata:
2184		 *  - Newer silicon (Rev 1A or better) required
2185		 */
2186		dev_dbg(dev->dev,
2187			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2188			__func__);
2189		break;
2190	case CHIPREV_NET2272_R1A:
2191		break;
2192	default:
2193		/* NET2272 silicon version *may* not work with this firmware */
2194		dev_dbg(dev->dev,
2195			"%s: unexpected silicon revision register value: "
2196			" CHIPREV_2272: 0x%2.2x\n",
2197			__func__, val);
2198		/*
2199		 * Return Success, even though the chip rev is not an expected value
2200		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2201		 *  - Often, new silicon is perfectly compatible
2202		 */
2203	}
2204
2205	/* Success: NET2272 checks out OK */
2206	return 0;
2207}
2208
2209static void
2210net2272_gadget_release(struct device *_dev)
2211{
2212	struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
2213
2214	kfree(dev);
2215}
2216
2217/*---------------------------------------------------------------------------*/
2218
2219static void
2220net2272_remove(struct net2272 *dev)
2221{
2222	if (dev->added)
2223		usb_del_gadget(&dev->gadget);
2224	free_irq(dev->irq, dev);
2225	iounmap(dev->base_addr);
2226	device_remove_file(dev->dev, &dev_attr_registers);
2227
2228	dev_info(dev->dev, "unbind\n");
2229}
2230
2231static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2232{
2233	struct net2272 *ret;
2234
2235	if (!irq) {
2236		dev_dbg(dev, "No IRQ!\n");
2237		return ERR_PTR(-ENODEV);
2238	}
2239
2240	/* alloc, and start init */
2241	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2242	if (!ret)
2243		return ERR_PTR(-ENOMEM);
2244
2245	spin_lock_init(&ret->lock);
2246	ret->irq = irq;
2247	ret->dev = dev;
2248	ret->gadget.ops = &net2272_ops;
2249	ret->gadget.max_speed = USB_SPEED_HIGH;
2250
2251	/* the "gadget" abstracts/virtualizes the controller */
2252	ret->gadget.name = driver_name;
2253	usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
2254
2255	return ret;
2256}
2257
2258static int
2259net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2260{
2261	int ret;
2262
2263	/* See if there... */
2264	if (net2272_present(dev)) {
2265		dev_warn(dev->dev, "2272 not found!\n");
2266		ret = -ENODEV;
2267		goto err;
2268	}
2269
2270	net2272_usb_reset(dev);
2271	net2272_usb_reinit(dev);
2272
2273	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2274	if (ret) {
2275		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2276		goto err;
2277	}
2278
2279	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2280
2281	/* done */
2282	dev_info(dev->dev, "%s\n", driver_desc);
2283	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2284		dev->irq, dev->base_addr, dev->chiprev,
2285		dma_mode_string());
2286	dev_info(dev->dev, "version: %s\n", driver_vers);
2287
2288	ret = device_create_file(dev->dev, &dev_attr_registers);
2289	if (ret)
2290		goto err_irq;
2291
2292	ret = usb_add_gadget(&dev->gadget);
2293	if (ret)
2294		goto err_add_udc;
2295	dev->added = 1;
2296
2297	return 0;
2298
2299err_add_udc:
2300	device_remove_file(dev->dev, &dev_attr_registers);
2301 err_irq:
2302	free_irq(dev->irq, dev);
2303 err:
2304	return ret;
2305}
2306
2307#ifdef CONFIG_USB_PCI
2308
2309/*
2310 * wrap this driver around the specified device, but
2311 * don't respond over USB until a gadget driver binds to us
2312 */
2313
2314static int
2315net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2316{
2317	unsigned long resource, len, tmp;
2318	void __iomem *mem_mapped_addr[4];
2319	int ret, i;
2320
2321	/*
2322	 * BAR 0 holds PLX 9054 config registers
2323	 * BAR 1 is i/o memory; unused here
2324	 * BAR 2 holds EPLD config registers
2325	 * BAR 3 holds NET2272 registers
2326	 */
2327
2328	/* Find and map all address spaces */
2329	for (i = 0; i < 4; ++i) {
2330		if (i == 1)
2331			continue;	/* BAR1 unused */
2332
2333		resource = pci_resource_start(pdev, i);
2334		len = pci_resource_len(pdev, i);
2335
2336		if (!request_mem_region(resource, len, driver_name)) {
2337			dev_dbg(dev->dev, "controller already in use\n");
2338			ret = -EBUSY;
2339			goto err;
2340		}
2341
2342		mem_mapped_addr[i] = ioremap(resource, len);
2343		if (mem_mapped_addr[i] == NULL) {
2344			release_mem_region(resource, len);
2345			dev_dbg(dev->dev, "can't map memory\n");
2346			ret = -EFAULT;
2347			goto err;
2348		}
2349	}
2350
2351	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2352	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2353	dev->base_addr = mem_mapped_addr[3];
2354
2355	/* Set PLX 9054 bus width (16 bits) */
2356	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2357	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2358			dev->rdk1.plx9054_base_addr + LBRD1);
2359
2360	/* Enable PLX 9054 Interrupts */
2361	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2362			(1 << PCI_INTERRUPT_ENABLE) |
2363			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2364			dev->rdk1.plx9054_base_addr + INTCSR);
2365
2366	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2367			dev->rdk1.plx9054_base_addr + DMACSR0);
2368
2369	/* reset */
2370	writeb((1 << EPLD_DMA_ENABLE) |
2371		(1 << DMA_CTL_DACK) |
2372		(1 << DMA_TIMEOUT_ENABLE) |
2373		(1 << USER) |
2374		(0 << MPX_MODE) |
2375		(1 << BUSWIDTH) |
2376		(1 << NET2272_RESET),
2377		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2378
2379	mb();
2380	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2381		~(1 << NET2272_RESET),
2382		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2383	udelay(200);
2384
2385	return 0;
2386
2387 err:
2388	while (--i >= 0) {
2389		if (i == 1)
2390			continue;	/* BAR1 unused */
2391		iounmap(mem_mapped_addr[i]);
2392		release_mem_region(pci_resource_start(pdev, i),
2393			pci_resource_len(pdev, i));
2394	}
2395
2396	return ret;
2397}
2398
2399static int
2400net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2401{
2402	unsigned long resource, len;
2403	void __iomem *mem_mapped_addr[2];
2404	int ret, i;
2405
2406	/*
2407	 * BAR 0 holds FGPA config registers
2408	 * BAR 1 holds NET2272 registers
2409	 */
2410
2411	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2412	for (i = 0; i < 2; ++i) {
2413		resource = pci_resource_start(pdev, i);
2414		len = pci_resource_len(pdev, i);
2415
2416		if (!request_mem_region(resource, len, driver_name)) {
2417			dev_dbg(dev->dev, "controller already in use\n");
2418			ret = -EBUSY;
2419			goto err;
2420		}
2421
2422		mem_mapped_addr[i] = ioremap(resource, len);
2423		if (mem_mapped_addr[i] == NULL) {
2424			release_mem_region(resource, len);
2425			dev_dbg(dev->dev, "can't map memory\n");
2426			ret = -EFAULT;
2427			goto err;
2428		}
2429	}
2430
2431	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2432	dev->base_addr = mem_mapped_addr[1];
2433
2434	mb();
2435	/* Set 2272 bus width (16 bits) and reset */
2436	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2437	udelay(200);
2438	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2439	/* Print fpga version number */
2440	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2441		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2442	/* Enable FPGA Interrupts */
2443	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2444
2445	return 0;
2446
2447 err:
2448	while (--i >= 0) {
2449		iounmap(mem_mapped_addr[i]);
2450		release_mem_region(pci_resource_start(pdev, i),
2451			pci_resource_len(pdev, i));
2452	}
2453
2454	return ret;
2455}
2456
2457static int
2458net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2459{
2460	struct net2272 *dev;
2461	int ret;
2462
2463	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2464	if (IS_ERR(dev))
2465		return PTR_ERR(dev);
2466	dev->dev_id = pdev->device;
2467
2468	if (pci_enable_device(pdev) < 0) {
2469		ret = -ENODEV;
2470		goto err_put;
2471	}
2472
2473	pci_set_master(pdev);
2474
2475	switch (pdev->device) {
2476	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2477	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2478	default: BUG();
2479	}
2480	if (ret)
2481		goto err_pci;
2482
2483	ret = net2272_probe_fin(dev, 0);
2484	if (ret)
2485		goto err_pci;
2486
2487	pci_set_drvdata(pdev, dev);
2488
2489	return 0;
2490
2491 err_pci:
2492	pci_disable_device(pdev);
2493 err_put:
2494	usb_put_gadget(&dev->gadget);
2495
2496	return ret;
2497}
2498
2499static void
2500net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2501{
2502	int i;
2503
2504	/* disable PLX 9054 interrupts */
2505	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2506		~(1 << PCI_INTERRUPT_ENABLE),
2507		dev->rdk1.plx9054_base_addr + INTCSR);
2508
2509	/* clean up resources allocated during probe() */
2510	iounmap(dev->rdk1.plx9054_base_addr);
2511	iounmap(dev->rdk1.epld_base_addr);
2512
2513	for (i = 0; i < 4; ++i) {
2514		if (i == 1)
2515			continue;	/* BAR1 unused */
2516		release_mem_region(pci_resource_start(pdev, i),
2517			pci_resource_len(pdev, i));
2518	}
2519}
2520
2521static void
2522net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2523{
2524	int i;
2525
2526	/* disable fpga interrupts
2527	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2528			~(1 << PCI_INTERRUPT_ENABLE),
2529			dev->rdk1.plx9054_base_addr + INTCSR);
2530	*/
2531
2532	/* clean up resources allocated during probe() */
2533	iounmap(dev->rdk2.fpga_base_addr);
2534
2535	for (i = 0; i < 2; ++i)
2536		release_mem_region(pci_resource_start(pdev, i),
2537			pci_resource_len(pdev, i));
2538}
2539
2540static void
2541net2272_pci_remove(struct pci_dev *pdev)
2542{
2543	struct net2272 *dev = pci_get_drvdata(pdev);
2544
2545	net2272_remove(dev);
2546
2547	switch (pdev->device) {
2548	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2549	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2550	default: BUG();
2551	}
2552
2553	pci_disable_device(pdev);
2554
2555	usb_put_gadget(&dev->gadget);
2556}
2557
2558/* Table of matching PCI IDs */
2559static struct pci_device_id pci_ids[] = {
2560	{	/* RDK 1 card */
2561		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562		.class_mask  = 0,
2563		.vendor      = PCI_VENDOR_ID_PLX,
2564		.device      = PCI_DEVICE_ID_RDK1,
2565		.subvendor   = PCI_ANY_ID,
2566		.subdevice   = PCI_ANY_ID,
2567	},
2568	{	/* RDK 2 card */
2569		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2570		.class_mask  = 0,
2571		.vendor      = PCI_VENDOR_ID_PLX,
2572		.device      = PCI_DEVICE_ID_RDK2,
2573		.subvendor   = PCI_ANY_ID,
2574		.subdevice   = PCI_ANY_ID,
2575	},
2576	{ }
2577};
2578MODULE_DEVICE_TABLE(pci, pci_ids);
2579
2580static struct pci_driver net2272_pci_driver = {
2581	.name     = driver_name,
2582	.id_table = pci_ids,
2583
2584	.probe    = net2272_pci_probe,
2585	.remove   = net2272_pci_remove,
2586};
2587
2588static int net2272_pci_register(void)
2589{
2590	return pci_register_driver(&net2272_pci_driver);
2591}
2592
2593static void net2272_pci_unregister(void)
2594{
2595	pci_unregister_driver(&net2272_pci_driver);
2596}
2597
2598#else
2599static inline int net2272_pci_register(void) { return 0; }
2600static inline void net2272_pci_unregister(void) { }
2601#endif
2602
2603/*---------------------------------------------------------------------------*/
2604
2605static int
2606net2272_plat_probe(struct platform_device *pdev)
2607{
2608	struct net2272 *dev;
2609	int ret;
2610	unsigned int irqflags;
2611	resource_size_t base, len;
2612	struct resource *iomem, *iomem_bus, *irq_res;
2613
2614	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2615	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2617	if (!irq_res || !iomem) {
2618		dev_err(&pdev->dev, "must provide irq/base addr");
2619		return -EINVAL;
2620	}
2621
2622	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2623	if (IS_ERR(dev))
2624		return PTR_ERR(dev);
2625
2626	irqflags = 0;
2627	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2628		irqflags |= IRQF_TRIGGER_RISING;
2629	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2630		irqflags |= IRQF_TRIGGER_FALLING;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2632		irqflags |= IRQF_TRIGGER_HIGH;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2634		irqflags |= IRQF_TRIGGER_LOW;
2635
2636	base = iomem->start;
2637	len = resource_size(iomem);
2638	if (iomem_bus)
2639		dev->base_shift = iomem_bus->start;
2640
2641	if (!request_mem_region(base, len, driver_name)) {
2642		dev_dbg(dev->dev, "get request memory region!\n");
2643		ret = -EBUSY;
2644		goto err;
2645	}
2646	dev->base_addr = ioremap(base, len);
2647	if (!dev->base_addr) {
2648		dev_dbg(dev->dev, "can't map memory\n");
2649		ret = -EFAULT;
2650		goto err_req;
2651	}
2652
2653	ret = net2272_probe_fin(dev, irqflags);
2654	if (ret)
2655		goto err_io;
2656
2657	platform_set_drvdata(pdev, dev);
2658	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2659		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2660
2661	return 0;
2662
2663 err_io:
2664	iounmap(dev->base_addr);
2665 err_req:
2666	release_mem_region(base, len);
2667 err:
2668	usb_put_gadget(&dev->gadget);
2669
2670	return ret;
2671}
2672
2673static void
2674net2272_plat_remove(struct platform_device *pdev)
2675{
2676	struct net2272 *dev = platform_get_drvdata(pdev);
2677
2678	net2272_remove(dev);
2679
2680	release_mem_region(pdev->resource[0].start,
2681		resource_size(&pdev->resource[0]));
2682
2683	usb_put_gadget(&dev->gadget);
 
 
2684}
2685
2686static struct platform_driver net2272_plat_driver = {
2687	.probe   = net2272_plat_probe,
2688	.remove = net2272_plat_remove,
2689	.driver  = {
2690		.name  = driver_name,
2691	},
2692	/* FIXME .suspend, .resume */
2693};
2694MODULE_ALIAS("platform:net2272");
2695
2696static int __init net2272_init(void)
2697{
2698	int ret;
2699
2700	ret = net2272_pci_register();
2701	if (ret)
2702		return ret;
2703	ret = platform_driver_register(&net2272_plat_driver);
2704	if (ret)
2705		goto err_pci;
2706	return ret;
2707
2708err_pci:
2709	net2272_pci_unregister();
2710	return ret;
2711}
2712module_init(net2272_init);
2713
2714static void __exit net2272_cleanup(void)
2715{
2716	net2272_pci_unregister();
2717	platform_driver_unregister(&net2272_plat_driver);
2718}
2719module_exit(net2272_cleanup);
2720
2721MODULE_DESCRIPTION(DRIVER_DESC);
2722MODULE_AUTHOR("PLX Technology, Inc.");
2723MODULE_LICENSE("GPL");