Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Driver for PLX NET2272 USB device controller
   4 *
   5 * Copyright (C) 2005-2006 PLX Technology, Inc.
   6 * Copyright (C) 2006-2011 Analog Devices, Inc.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/delay.h>
  10#include <linux/device.h>
  11#include <linux/errno.h>
 
  12#include <linux/init.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/ioport.h>
  16#include <linux/kernel.h>
  17#include <linux/list.h>
  18#include <linux/module.h>
  19#include <linux/moduleparam.h>
  20#include <linux/pci.h>
  21#include <linux/platform_device.h>
  22#include <linux/prefetch.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/timer.h>
  26#include <linux/usb.h>
  27#include <linux/usb/ch9.h>
  28#include <linux/usb/gadget.h>
  29
  30#include <asm/byteorder.h>
  31#include <asm/unaligned.h>
  32
  33#include "net2272.h"
  34
  35#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  36
  37static const char driver_name[] = "net2272";
  38static const char driver_vers[] = "2006 October 17/mainline";
  39static const char driver_desc[] = DRIVER_DESC;
  40
  41static const char ep0name[] = "ep0";
  42static const char * const ep_name[] = {
  43	ep0name,
  44	"ep-a", "ep-b", "ep-c",
  45};
  46
  47#ifdef CONFIG_USB_NET2272_DMA
  48/*
  49 * use_dma: the NET2272 can use an external DMA controller.
  50 * Note that since there is no generic DMA api, some functions,
  51 * notably request_dma, start_dma, and cancel_dma will need to be
  52 * modified for your platform's particular dma controller.
  53 *
  54 * If use_dma is disabled, pio will be used instead.
  55 */
  56static bool use_dma = false;
  57module_param(use_dma, bool, 0644);
  58
  59/*
  60 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  61 * The NET2272 can only use dma for a single endpoint at a time.
  62 * At some point this could be modified to allow either endpoint
  63 * to take control of dma as it becomes available.
  64 *
  65 * Note that DMA should not be used on OUT endpoints unless it can
  66 * be guaranteed that no short packets will arrive on an IN endpoint
  67 * while the DMA operation is pending.  Otherwise the OUT DMA will
  68 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  69 */
  70static ushort dma_ep = 1;
  71module_param(dma_ep, ushort, 0644);
  72
  73/*
  74 * dma_mode: net2272 dma mode setting (see LOCCTL1 definition):
  75 *	mode 0 == Slow DREQ mode
  76 *	mode 1 == Fast DREQ mode
  77 *	mode 2 == Burst mode
  78 */
  79static ushort dma_mode = 2;
  80module_param(dma_mode, ushort, 0644);
  81#else
  82#define use_dma 0
  83#define dma_ep 1
  84#define dma_mode 2
  85#endif
  86
  87/*
  88 * fifo_mode: net2272 buffer configuration:
  89 *      mode 0 == ep-{a,b,c} 512db each
  90 *      mode 1 == ep-a 1k, ep-{b,c} 512db
  91 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
  92 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
  93 */
  94static ushort fifo_mode;
  95module_param(fifo_mode, ushort, 0644);
  96
  97/*
  98 * enable_suspend: When enabled, the driver will respond to
  99 * USB suspend requests by powering down the NET2272.  Otherwise,
 100 * USB suspend requests will be ignored.  This is acceptable for
 101 * self-powered devices.  For bus powered devices set this to 1.
 102 */
 103static ushort enable_suspend;
 104module_param(enable_suspend, ushort, 0644);
 105
 106static void assert_out_naking(struct net2272_ep *ep, const char *where)
 107{
 108	u8 tmp;
 109
 110#ifndef DEBUG
 111	return;
 112#endif
 113
 114	tmp = net2272_ep_read(ep, EP_STAT0);
 115	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 116		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 117			ep->ep.name, where, tmp);
 118		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 119	}
 120}
 121#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 122
 123static void stop_out_naking(struct net2272_ep *ep)
 124{
 125	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 126
 127	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 128		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 129}
 130
 131#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 132
 133static char *type_string(u8 bmAttributes)
 134{
 135	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 136	case USB_ENDPOINT_XFER_BULK: return "bulk";
 137	case USB_ENDPOINT_XFER_ISOC: return "iso";
 138	case USB_ENDPOINT_XFER_INT:  return "intr";
 139	default:                     return "control";
 140	}
 141}
 142
 143static char *buf_state_string(unsigned state)
 144{
 145	switch (state) {
 146	case BUFF_FREE:  return "free";
 147	case BUFF_VALID: return "valid";
 148	case BUFF_LCL:   return "local";
 149	case BUFF_USB:   return "usb";
 150	default:         return "unknown";
 151	}
 152}
 153
 154static char *dma_mode_string(void)
 155{
 156	if (!use_dma)
 157		return "PIO";
 158	switch (dma_mode) {
 159	case 0:  return "SLOW DREQ";
 160	case 1:  return "FAST DREQ";
 161	case 2:  return "BURST";
 162	default: return "invalid";
 163	}
 164}
 165
 166static void net2272_dequeue_all(struct net2272_ep *);
 167static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 168static int net2272_fifo_status(struct usb_ep *);
 169
 170static const struct usb_ep_ops net2272_ep_ops;
 171
 172/*---------------------------------------------------------------------------*/
 173
 174static int
 175net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 176{
 177	struct net2272 *dev;
 178	struct net2272_ep *ep;
 179	u32 max;
 180	u8 tmp;
 181	unsigned long flags;
 182
 183	ep = container_of(_ep, struct net2272_ep, ep);
 184	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 185			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 186		return -EINVAL;
 187	dev = ep->dev;
 188	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 189		return -ESHUTDOWN;
 190
 191	max = usb_endpoint_maxp(desc);
 192
 193	spin_lock_irqsave(&dev->lock, flags);
 194	_ep->maxpacket = max;
 195	ep->desc = desc;
 196
 197	/* net2272_ep_reset() has already been called */
 198	ep->stopped = 0;
 199	ep->wedged = 0;
 200
 201	/* set speed-dependent max packet */
 202	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 203	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 204
 205	/* set type, direction, address; reset fifo counters */
 206	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 207	tmp = usb_endpoint_type(desc);
 208	if (usb_endpoint_xfer_bulk(desc)) {
 209		/* catch some particularly blatant driver bugs */
 210		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 211		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 212			spin_unlock_irqrestore(&dev->lock, flags);
 213			return -ERANGE;
 214		}
 215	}
 216	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 217	tmp <<= ENDPOINT_TYPE;
 218	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 219	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 220	tmp |= (1 << ENDPOINT_ENABLE);
 221
 222	/* for OUT transfers, block the rx fifo until a read is posted */
 223	ep->is_in = usb_endpoint_dir_in(desc);
 224	if (!ep->is_in)
 225		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 226
 227	net2272_ep_write(ep, EP_CFG, tmp);
 228
 229	/* enable irqs */
 230	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 231	net2272_write(dev, IRQENB0, tmp);
 232
 233	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 234		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 235		| net2272_ep_read(ep, EP_IRQENB);
 236	net2272_ep_write(ep, EP_IRQENB, tmp);
 237
 238	tmp = desc->bEndpointAddress;
 239	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 240		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 241		type_string(desc->bmAttributes), max,
 242		net2272_ep_read(ep, EP_CFG));
 243
 244	spin_unlock_irqrestore(&dev->lock, flags);
 245	return 0;
 246}
 247
 248static void net2272_ep_reset(struct net2272_ep *ep)
 249{
 250	u8 tmp;
 251
 252	ep->desc = NULL;
 253	INIT_LIST_HEAD(&ep->queue);
 254
 255	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 256	ep->ep.ops = &net2272_ep_ops;
 257
 258	/* disable irqs, endpoint */
 259	net2272_ep_write(ep, EP_IRQENB, 0);
 260
 261	/* init to our chosen defaults, notably so that we NAK OUT
 262	 * packets until the driver queues a read.
 263	 */
 264	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 265	net2272_ep_write(ep, EP_RSPSET, tmp);
 266
 267	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 268	if (ep->num != 0)
 269		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 270
 271	net2272_ep_write(ep, EP_RSPCLR, tmp);
 272
 273	/* scrub most status bits, and flush any fifo state */
 274	net2272_ep_write(ep, EP_STAT0,
 275			  (1 << DATA_IN_TOKEN_INTERRUPT)
 276			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 277			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 278			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 279			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 280
 281	net2272_ep_write(ep, EP_STAT1,
 282			    (1 << TIMEOUT)
 283			  | (1 << USB_OUT_ACK_SENT)
 284			  | (1 << USB_OUT_NAK_SENT)
 285			  | (1 << USB_IN_ACK_RCVD)
 286			  | (1 << USB_IN_NAK_SENT)
 287			  | (1 << USB_STALL_SENT)
 288			  | (1 << LOCAL_OUT_ZLP)
 289			  | (1 << BUFFER_FLUSH));
 290
 291	/* fifo size is handled separately */
 292}
 293
 294static int net2272_disable(struct usb_ep *_ep)
 295{
 296	struct net2272_ep *ep;
 297	unsigned long flags;
 298
 299	ep = container_of(_ep, struct net2272_ep, ep);
 300	if (!_ep || !ep->desc || _ep->name == ep0name)
 301		return -EINVAL;
 302
 303	spin_lock_irqsave(&ep->dev->lock, flags);
 304	net2272_dequeue_all(ep);
 305	net2272_ep_reset(ep);
 306
 307	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 308
 309	spin_unlock_irqrestore(&ep->dev->lock, flags);
 310	return 0;
 311}
 312
 313/*---------------------------------------------------------------------------*/
 314
 315static struct usb_request *
 316net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 317{
 
 318	struct net2272_request *req;
 319
 320	if (!_ep)
 321		return NULL;
 
 322
 323	req = kzalloc(sizeof(*req), gfp_flags);
 324	if (!req)
 325		return NULL;
 326
 327	INIT_LIST_HEAD(&req->queue);
 328
 329	return &req->req;
 330}
 331
 332static void
 333net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 334{
 
 335	struct net2272_request *req;
 336
 
 337	if (!_ep || !_req)
 338		return;
 339
 340	req = container_of(_req, struct net2272_request, req);
 341	WARN_ON(!list_empty(&req->queue));
 342	kfree(req);
 343}
 344
 345static void
 346net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 347{
 348	struct net2272 *dev;
 349	unsigned stopped = ep->stopped;
 350
 351	if (ep->num == 0) {
 352		if (ep->dev->protocol_stall) {
 353			ep->stopped = 1;
 354			set_halt(ep);
 355		}
 356		allow_status(ep);
 357	}
 358
 359	list_del_init(&req->queue);
 360
 361	if (req->req.status == -EINPROGRESS)
 362		req->req.status = status;
 363	else
 364		status = req->req.status;
 365
 366	dev = ep->dev;
 367	if (use_dma && ep->dma)
 368		usb_gadget_unmap_request(&dev->gadget, &req->req,
 369				ep->is_in);
 370
 371	if (status && status != -ESHUTDOWN)
 372		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 373			ep->ep.name, &req->req, status,
 374			req->req.actual, req->req.length, req->req.buf);
 375
 376	/* don't modify queue heads during completion callback */
 377	ep->stopped = 1;
 378	spin_unlock(&dev->lock);
 379	usb_gadget_giveback_request(&ep->ep, &req->req);
 380	spin_lock(&dev->lock);
 381	ep->stopped = stopped;
 382}
 383
 384static int
 385net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 386	struct net2272_request *req, unsigned max)
 387{
 388	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 389	u16 *bufp;
 390	unsigned length, count;
 391	u8 tmp;
 392
 393	length = min(req->req.length - req->req.actual, max);
 394	req->req.actual += length;
 395
 396	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 397		ep->ep.name, req, max, length,
 398		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 399
 400	count = length;
 401	bufp = (u16 *)buf;
 402
 403	while (likely(count >= 2)) {
 404		/* no byte-swap required; chip endian set during init */
 405		writew(*bufp++, ep_data);
 406		count -= 2;
 407	}
 408	buf = (u8 *)bufp;
 409
 410	/* write final byte by placing the NET2272 into 8-bit mode */
 411	if (unlikely(count)) {
 412		tmp = net2272_read(ep->dev, LOCCTL);
 413		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 414		writeb(*buf, ep_data);
 415		net2272_write(ep->dev, LOCCTL, tmp);
 416	}
 417	return length;
 418}
 419
 420/* returns: 0: still running, 1: completed, negative: errno */
 421static int
 422net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 423{
 424	u8 *buf;
 425	unsigned count, max;
 426	int status;
 427
 428	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 429		ep->ep.name, req->req.actual, req->req.length);
 430
 431	/*
 432	 * Keep loading the endpoint until the final packet is loaded,
 433	 * or the endpoint buffer is full.
 434	 */
 435 top:
 436	/*
 437	 * Clear interrupt status
 438	 *  - Packet Transmitted interrupt will become set again when the
 439	 *    host successfully takes another packet
 440	 */
 441	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 442	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 443		buf = req->req.buf + req->req.actual;
 444		prefetch(buf);
 445
 446		/* force pagesel */
 447		net2272_ep_read(ep, EP_STAT0);
 448
 449		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 450			(net2272_ep_read(ep, EP_AVAIL0));
 451
 452		if (max < ep->ep.maxpacket)
 453			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 454				| (net2272_ep_read(ep, EP_AVAIL0));
 455
 456		count = net2272_write_packet(ep, buf, req, max);
 457		/* see if we are done */
 458		if (req->req.length == req->req.actual) {
 459			/* validate short or zlp packet */
 460			if (count < ep->ep.maxpacket)
 461				set_fifo_bytecount(ep, 0);
 462			net2272_done(ep, req, 0);
 463
 464			if (!list_empty(&ep->queue)) {
 465				req = list_entry(ep->queue.next,
 466						struct net2272_request,
 467						queue);
 468				status = net2272_kick_dma(ep, req);
 469
 470				if (status < 0)
 471					if ((net2272_ep_read(ep, EP_STAT0)
 472							& (1 << BUFFER_EMPTY)))
 473						goto top;
 474			}
 475			return 1;
 476		}
 477		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 478	}
 479	return 0;
 480}
 481
 482static void
 483net2272_out_flush(struct net2272_ep *ep)
 484{
 485	ASSERT_OUT_NAKING(ep);
 486
 487	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 488			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 489	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 490}
 491
 492static int
 493net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 494	struct net2272_request *req, unsigned avail)
 495{
 496	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 497	unsigned is_short;
 498	u16 *bufp;
 499
 500	req->req.actual += avail;
 501
 502	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 503		ep->ep.name, req, avail,
 504		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 505
 506	is_short = (avail < ep->ep.maxpacket);
 507
 508	if (unlikely(avail == 0)) {
 509		/* remove any zlp from the buffer */
 510		(void)readw(ep_data);
 511		return is_short;
 512	}
 513
 514	/* Ensure we get the final byte */
 515	if (unlikely(avail % 2))
 516		avail++;
 517	bufp = (u16 *)buf;
 518
 519	do {
 520		*bufp++ = readw(ep_data);
 521		avail -= 2;
 522	} while (avail);
 523
 524	/*
 525	 * To avoid false endpoint available race condition must read
 526	 * ep stat0 twice in the case of a short transfer
 527	 */
 528	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 529		net2272_ep_read(ep, EP_STAT0);
 530
 531	return is_short;
 532}
 533
 534static int
 535net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 536{
 537	u8 *buf;
 538	unsigned is_short;
 539	int count;
 540	int tmp;
 541	int cleanup = 0;
 
 542
 543	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 544		ep->ep.name, req->req.actual, req->req.length);
 545
 546 top:
 547	do {
 548		buf = req->req.buf + req->req.actual;
 549		prefetchw(buf);
 550
 551		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 552			| net2272_ep_read(ep, EP_AVAIL0);
 553
 554		net2272_ep_write(ep, EP_STAT0,
 555			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 556			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 557
 558		tmp = req->req.length - req->req.actual;
 559
 560		if (count > tmp) {
 561			if ((tmp % ep->ep.maxpacket) != 0) {
 562				dev_err(ep->dev->dev,
 563					"%s out fifo %d bytes, expected %d\n",
 564					ep->ep.name, count, tmp);
 565				cleanup = 1;
 566			}
 567			count = (tmp > 0) ? tmp : 0;
 568		}
 569
 570		is_short = net2272_read_packet(ep, buf, req, count);
 571
 572		/* completion */
 573		if (unlikely(cleanup || is_short ||
 574				req->req.actual == req->req.length)) {
 
 575
 576			if (cleanup) {
 577				net2272_out_flush(ep);
 578				net2272_done(ep, req, -EOVERFLOW);
 579			} else
 580				net2272_done(ep, req, 0);
 581
 582			/* re-initialize endpoint transfer registers
 583			 * otherwise they may result in erroneous pre-validation
 584			 * for subsequent control reads
 585			 */
 586			if (unlikely(ep->num == 0)) {
 587				net2272_ep_write(ep, EP_TRANSFER2, 0);
 588				net2272_ep_write(ep, EP_TRANSFER1, 0);
 589				net2272_ep_write(ep, EP_TRANSFER0, 0);
 590			}
 591
 592			if (!list_empty(&ep->queue)) {
 593				int status;
 594
 595				req = list_entry(ep->queue.next,
 596					struct net2272_request, queue);
 597				status = net2272_kick_dma(ep, req);
 598				if ((status < 0) &&
 599				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 600					goto top;
 601			}
 602			return 1;
 603		}
 604	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 605
 606	return 0;
 607}
 608
 609static void
 610net2272_pio_advance(struct net2272_ep *ep)
 611{
 612	struct net2272_request *req;
 613
 614	if (unlikely(list_empty(&ep->queue)))
 615		return;
 616
 617	req = list_entry(ep->queue.next, struct net2272_request, queue);
 618	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 619}
 620
 621/* returns 0 on success, else negative errno */
 622static int
 623net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 624	unsigned len, unsigned dir)
 625{
 626	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 627		ep, buf, len, dir);
 628
 629	/* The NET2272 only supports a single dma channel */
 630	if (dev->dma_busy)
 631		return -EBUSY;
 632	/*
 633	 * EP_TRANSFER (used to determine the number of bytes received
 634	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 635	 */
 636	if ((dir == 1) && (len > 0x1000000))
 637		return -EINVAL;
 638
 639	dev->dma_busy = 1;
 640
 641	/* initialize platform's dma */
 642#ifdef CONFIG_USB_PCI
 643	/* NET2272 addr, buffer addr, length, etc. */
 644	switch (dev->dev_id) {
 645	case PCI_DEVICE_ID_RDK1:
 646		/* Setup PLX 9054 DMA mode */
 647		writel((1 << LOCAL_BUS_WIDTH) |
 648			(1 << TA_READY_INPUT_ENABLE) |
 649			(0 << LOCAL_BURST_ENABLE) |
 650			(1 << DONE_INTERRUPT_ENABLE) |
 651			(1 << LOCAL_ADDRESSING_MODE) |
 652			(1 << DEMAND_MODE) |
 653			(1 << DMA_EOT_ENABLE) |
 654			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 655			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 656			dev->rdk1.plx9054_base_addr + DMAMODE0);
 657
 658		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 659		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 660		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 661		writel((dir << DIRECTION_OF_TRANSFER) |
 662			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 663			dev->rdk1.plx9054_base_addr + DMADPR0);
 664		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 665			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 666			dev->rdk1.plx9054_base_addr + INTCSR);
 667
 668		break;
 669	}
 670#endif
 671
 672	net2272_write(dev, DMAREQ,
 673		(0 << DMA_BUFFER_VALID) |
 674		(1 << DMA_REQUEST_ENABLE) |
 675		(1 << DMA_CONTROL_DACK) |
 676		(dev->dma_eot_polarity << EOT_POLARITY) |
 677		(dev->dma_dack_polarity << DACK_POLARITY) |
 678		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 679		((ep >> 1) << DMA_ENDPOINT_SELECT));
 680
 681	(void) net2272_read(dev, SCRATCH);
 682
 683	return 0;
 684}
 685
 686static void
 687net2272_start_dma(struct net2272 *dev)
 688{
 689	/* start platform's dma controller */
 690#ifdef CONFIG_USB_PCI
 691	switch (dev->dev_id) {
 692	case PCI_DEVICE_ID_RDK1:
 693		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 694			dev->rdk1.plx9054_base_addr + DMACSR0);
 695		break;
 696	}
 697#endif
 698}
 699
 700/* returns 0 on success, else negative errno */
 701static int
 702net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 703{
 704	unsigned size;
 705	u8 tmp;
 706
 707	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 708		return -EINVAL;
 709
 710	/* don't use dma for odd-length transfers
 711	 * otherwise, we'd need to deal with the last byte with pio
 712	 */
 713	if (req->req.length & 1)
 714		return -EINVAL;
 715
 716	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 717		ep->ep.name, req, (unsigned long long) req->req.dma);
 718
 719	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 720
 721	/* The NET2272 can only use DMA on one endpoint at a time */
 722	if (ep->dev->dma_busy)
 723		return -EBUSY;
 724
 725	/* Make sure we only DMA an even number of bytes (we'll use
 726	 * pio to complete the transfer)
 727	 */
 728	size = req->req.length;
 729	size &= ~1;
 730
 731	/* device-to-host transfer */
 732	if (ep->is_in) {
 733		/* initialize platform's dma controller */
 734		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 735			/* unable to obtain DMA channel; return error and use pio mode */
 736			return -EBUSY;
 737		req->req.actual += size;
 738
 739	/* host-to-device transfer */
 740	} else {
 741		tmp = net2272_ep_read(ep, EP_STAT0);
 742
 743		/* initialize platform's dma controller */
 744		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 745			/* unable to obtain DMA channel; return error and use pio mode */
 746			return -EBUSY;
 747
 748		if (!(tmp & (1 << BUFFER_EMPTY)))
 749			ep->not_empty = 1;
 750		else
 751			ep->not_empty = 0;
 752
 753
 754		/* allow the endpoint's buffer to fill */
 755		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 756
 757		/* this transfer completed and data's already in the fifo
 758		 * return error so pio gets used.
 759		 */
 760		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 761
 762			/* deassert dreq */
 763			net2272_write(ep->dev, DMAREQ,
 764				(0 << DMA_BUFFER_VALID) |
 765				(0 << DMA_REQUEST_ENABLE) |
 766				(1 << DMA_CONTROL_DACK) |
 767				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 768				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 769				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 770				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 771
 772			return -EBUSY;
 773		}
 774	}
 775
 776	/* Don't use per-packet interrupts: use dma interrupts only */
 777	net2272_ep_write(ep, EP_IRQENB, 0);
 778
 779	net2272_start_dma(ep->dev);
 780
 781	return 0;
 782}
 783
 784static void net2272_cancel_dma(struct net2272 *dev)
 785{
 786#ifdef CONFIG_USB_PCI
 787	switch (dev->dev_id) {
 788	case PCI_DEVICE_ID_RDK1:
 789		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 790		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 791		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 792		         (1 << CHANNEL_DONE)))
 793			continue;	/* wait for dma to stabalize */
 794
 795		/* dma abort generates an interrupt */
 796		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 797			dev->rdk1.plx9054_base_addr + DMACSR0);
 798		break;
 799	}
 800#endif
 801
 802	dev->dma_busy = 0;
 803}
 804
 805/*---------------------------------------------------------------------------*/
 806
 807static int
 808net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 809{
 810	struct net2272_request *req;
 811	struct net2272_ep *ep;
 812	struct net2272 *dev;
 813	unsigned long flags;
 814	int status = -1;
 815	u8 s;
 816
 817	req = container_of(_req, struct net2272_request, req);
 818	if (!_req || !_req->complete || !_req->buf
 819			|| !list_empty(&req->queue))
 820		return -EINVAL;
 821	ep = container_of(_ep, struct net2272_ep, ep);
 822	if (!_ep || (!ep->desc && ep->num != 0))
 823		return -EINVAL;
 824	dev = ep->dev;
 825	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 826		return -ESHUTDOWN;
 827
 828	/* set up dma mapping in case the caller didn't */
 829	if (use_dma && ep->dma) {
 830		status = usb_gadget_map_request(&dev->gadget, _req,
 831				ep->is_in);
 832		if (status)
 833			return status;
 834	}
 835
 836	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 837		_ep->name, _req, _req->length, _req->buf,
 838		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 839
 840	spin_lock_irqsave(&dev->lock, flags);
 841
 842	_req->status = -EINPROGRESS;
 843	_req->actual = 0;
 844
 845	/* kickstart this i/o queue? */
 846	if (list_empty(&ep->queue) && !ep->stopped) {
 847		/* maybe there's no control data, just status ack */
 848		if (ep->num == 0 && _req->length == 0) {
 849			net2272_done(ep, req, 0);
 850			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 851			goto done;
 852		}
 853
 854		/* Return zlp, don't let it block subsequent packets */
 855		s = net2272_ep_read(ep, EP_STAT0);
 856		if (s & (1 << BUFFER_EMPTY)) {
 857			/* Buffer is empty check for a blocking zlp, handle it */
 858			if ((s & (1 << NAK_OUT_PACKETS)) &&
 859			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 860				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 861				/*
 862				 * Request is going to terminate with a short packet ...
 863				 * hope the client is ready for it!
 864				 */
 865				status = net2272_read_fifo(ep, req);
 866				/* clear short packet naking */
 867				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 868				goto done;
 869			}
 870		}
 871
 872		/* try dma first */
 873		status = net2272_kick_dma(ep, req);
 874
 875		if (status < 0) {
 876			/* dma failed (most likely in use by another endpoint)
 877			 * fallback to pio
 878			 */
 879			status = 0;
 880
 881			if (ep->is_in)
 882				status = net2272_write_fifo(ep, req);
 883			else {
 884				s = net2272_ep_read(ep, EP_STAT0);
 885				if ((s & (1 << BUFFER_EMPTY)) == 0)
 886					status = net2272_read_fifo(ep, req);
 887			}
 888
 889			if (unlikely(status != 0)) {
 890				if (status > 0)
 891					status = 0;
 892				req = NULL;
 893			}
 894		}
 895	}
 896	if (likely(req))
 897		list_add_tail(&req->queue, &ep->queue);
 898
 899	if (likely(!list_empty(&ep->queue)))
 900		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 901 done:
 902	spin_unlock_irqrestore(&dev->lock, flags);
 903
 904	return 0;
 905}
 906
 907/* dequeue ALL requests */
 908static void
 909net2272_dequeue_all(struct net2272_ep *ep)
 910{
 911	struct net2272_request *req;
 912
 913	/* called with spinlock held */
 914	ep->stopped = 1;
 915
 916	while (!list_empty(&ep->queue)) {
 917		req = list_entry(ep->queue.next,
 918				struct net2272_request,
 919				queue);
 920		net2272_done(ep, req, -ESHUTDOWN);
 921	}
 922}
 923
 924/* dequeue JUST ONE request */
 925static int
 926net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 927{
 928	struct net2272_ep *ep;
 929	struct net2272_request *req = NULL, *iter;
 930	unsigned long flags;
 931	int stopped;
 932
 933	ep = container_of(_ep, struct net2272_ep, ep);
 934	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 935		return -EINVAL;
 936
 937	spin_lock_irqsave(&ep->dev->lock, flags);
 938	stopped = ep->stopped;
 939	ep->stopped = 1;
 940
 941	/* make sure it's still queued on this endpoint */
 942	list_for_each_entry(iter, &ep->queue, queue) {
 943		if (&iter->req != _req)
 944			continue;
 945		req = iter;
 946		break;
 947	}
 948	if (!req) {
 949		ep->stopped = stopped;
 950		spin_unlock_irqrestore(&ep->dev->lock, flags);
 951		return -EINVAL;
 952	}
 953
 954	/* queue head may be partially complete */
 955	if (ep->queue.next == &req->queue) {
 956		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 957		net2272_done(ep, req, -ECONNRESET);
 958	}
 
 959	ep->stopped = stopped;
 960
 961	spin_unlock_irqrestore(&ep->dev->lock, flags);
 962	return 0;
 963}
 964
 965/*---------------------------------------------------------------------------*/
 966
 967static int
 968net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 969{
 970	struct net2272_ep *ep;
 971	unsigned long flags;
 972	int ret = 0;
 973
 974	ep = container_of(_ep, struct net2272_ep, ep);
 975	if (!_ep || (!ep->desc && ep->num != 0))
 976		return -EINVAL;
 977	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 978		return -ESHUTDOWN;
 979	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 980		return -EINVAL;
 981
 982	spin_lock_irqsave(&ep->dev->lock, flags);
 983	if (!list_empty(&ep->queue))
 984		ret = -EAGAIN;
 985	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
 986		ret = -EAGAIN;
 987	else {
 988		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
 989			value ? "set" : "clear",
 990			wedged ? "wedge" : "halt");
 991		/* set/clear */
 992		if (value) {
 993			if (ep->num == 0)
 994				ep->dev->protocol_stall = 1;
 995			else
 996				set_halt(ep);
 997			if (wedged)
 998				ep->wedged = 1;
 999		} else {
1000			clear_halt(ep);
1001			ep->wedged = 0;
1002		}
1003	}
1004	spin_unlock_irqrestore(&ep->dev->lock, flags);
1005
1006	return ret;
1007}
1008
1009static int
1010net2272_set_halt(struct usb_ep *_ep, int value)
1011{
1012	return net2272_set_halt_and_wedge(_ep, value, 0);
1013}
1014
1015static int
1016net2272_set_wedge(struct usb_ep *_ep)
1017{
1018	if (!_ep || _ep->name == ep0name)
1019		return -EINVAL;
1020	return net2272_set_halt_and_wedge(_ep, 1, 1);
1021}
1022
1023static int
1024net2272_fifo_status(struct usb_ep *_ep)
1025{
1026	struct net2272_ep *ep;
1027	u16 avail;
1028
1029	ep = container_of(_ep, struct net2272_ep, ep);
1030	if (!_ep || (!ep->desc && ep->num != 0))
1031		return -ENODEV;
1032	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1033		return -ESHUTDOWN;
1034
1035	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1036	avail |= net2272_ep_read(ep, EP_AVAIL0);
1037	if (avail > ep->fifo_size)
1038		return -EOVERFLOW;
1039	if (ep->is_in)
1040		avail = ep->fifo_size - avail;
1041	return avail;
1042}
1043
1044static void
1045net2272_fifo_flush(struct usb_ep *_ep)
1046{
1047	struct net2272_ep *ep;
1048
1049	ep = container_of(_ep, struct net2272_ep, ep);
1050	if (!_ep || (!ep->desc && ep->num != 0))
1051		return;
1052	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1053		return;
1054
1055	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1056}
1057
1058static const struct usb_ep_ops net2272_ep_ops = {
1059	.enable        = net2272_enable,
1060	.disable       = net2272_disable,
1061
1062	.alloc_request = net2272_alloc_request,
1063	.free_request  = net2272_free_request,
1064
1065	.queue         = net2272_queue,
1066	.dequeue       = net2272_dequeue,
1067
1068	.set_halt      = net2272_set_halt,
1069	.set_wedge     = net2272_set_wedge,
1070	.fifo_status   = net2272_fifo_status,
1071	.fifo_flush    = net2272_fifo_flush,
1072};
1073
1074/*---------------------------------------------------------------------------*/
1075
1076static int
1077net2272_get_frame(struct usb_gadget *_gadget)
1078{
1079	struct net2272 *dev;
1080	unsigned long flags;
1081	u16 ret;
1082
1083	if (!_gadget)
1084		return -ENODEV;
1085	dev = container_of(_gadget, struct net2272, gadget);
1086	spin_lock_irqsave(&dev->lock, flags);
1087
1088	ret = net2272_read(dev, FRAME1) << 8;
1089	ret |= net2272_read(dev, FRAME0);
1090
1091	spin_unlock_irqrestore(&dev->lock, flags);
1092	return ret;
1093}
1094
1095static int
1096net2272_wakeup(struct usb_gadget *_gadget)
1097{
1098	struct net2272 *dev;
1099	u8 tmp;
1100	unsigned long flags;
1101
1102	if (!_gadget)
1103		return 0;
1104	dev = container_of(_gadget, struct net2272, gadget);
1105
1106	spin_lock_irqsave(&dev->lock, flags);
1107	tmp = net2272_read(dev, USBCTL0);
1108	if (tmp & (1 << IO_WAKEUP_ENABLE))
1109		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1110
1111	spin_unlock_irqrestore(&dev->lock, flags);
1112
1113	return 0;
1114}
1115
1116static int
1117net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1118{
1119	if (!_gadget)
1120		return -ENODEV;
1121
1122	_gadget->is_selfpowered = (value != 0);
1123
1124	return 0;
1125}
1126
1127static int
1128net2272_pullup(struct usb_gadget *_gadget, int is_on)
1129{
1130	struct net2272 *dev;
1131	u8 tmp;
1132	unsigned long flags;
1133
1134	if (!_gadget)
1135		return -ENODEV;
1136	dev = container_of(_gadget, struct net2272, gadget);
1137
1138	spin_lock_irqsave(&dev->lock, flags);
1139	tmp = net2272_read(dev, USBCTL0);
1140	dev->softconnect = (is_on != 0);
1141	if (is_on)
1142		tmp |= (1 << USB_DETECT_ENABLE);
1143	else
1144		tmp &= ~(1 << USB_DETECT_ENABLE);
1145	net2272_write(dev, USBCTL0, tmp);
1146	spin_unlock_irqrestore(&dev->lock, flags);
1147
1148	return 0;
1149}
1150
1151static int net2272_start(struct usb_gadget *_gadget,
1152		struct usb_gadget_driver *driver);
1153static int net2272_stop(struct usb_gadget *_gadget);
1154static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable);
1155
1156static const struct usb_gadget_ops net2272_ops = {
1157	.get_frame	= net2272_get_frame,
1158	.wakeup		= net2272_wakeup,
1159	.set_selfpowered = net2272_set_selfpowered,
1160	.pullup		= net2272_pullup,
1161	.udc_start	= net2272_start,
1162	.udc_stop	= net2272_stop,
1163	.udc_async_callbacks = net2272_async_callbacks,
1164};
1165
1166/*---------------------------------------------------------------------------*/
1167
1168static ssize_t
1169registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1170{
1171	struct net2272 *dev;
1172	char *next;
1173	unsigned size, t;
1174	unsigned long flags;
1175	u8 t1, t2;
1176	int i;
1177	const char *s;
1178
1179	dev = dev_get_drvdata(_dev);
1180	next = buf;
1181	size = PAGE_SIZE;
1182	spin_lock_irqsave(&dev->lock, flags);
1183
 
 
 
 
 
1184	/* Main Control Registers */
1185	t = scnprintf(next, size, "%s version %s,"
1186		"chiprev %02x, locctl %02x\n"
1187		"irqenb0 %02x irqenb1 %02x "
1188		"irqstat0 %02x irqstat1 %02x\n",
1189		driver_name, driver_vers, dev->chiprev,
1190		net2272_read(dev, LOCCTL),
1191		net2272_read(dev, IRQENB0),
1192		net2272_read(dev, IRQENB1),
1193		net2272_read(dev, IRQSTAT0),
1194		net2272_read(dev, IRQSTAT1));
1195	size -= t;
1196	next += t;
1197
1198	/* DMA */
1199	t1 = net2272_read(dev, DMAREQ);
1200	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1201		t1, ep_name[(t1 & 0x01) + 1],
1202		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1203		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1204		t1 & (1 << DMA_REQUEST) ? "req " : "",
1205		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1206	size -= t;
1207	next += t;
1208
1209	/* USB Control Registers */
1210	t1 = net2272_read(dev, USBCTL1);
1211	if (t1 & (1 << VBUS_PIN)) {
1212		if (t1 & (1 << USB_HIGH_SPEED))
1213			s = "high speed";
1214		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1215			s = "powered";
1216		else
1217			s = "full speed";
1218	} else
1219		s = "not attached";
1220	t = scnprintf(next, size,
1221		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1222		net2272_read(dev, USBCTL0), t1,
1223		net2272_read(dev, OURADDR), s);
1224	size -= t;
1225	next += t;
1226
1227	/* Endpoint Registers */
1228	for (i = 0; i < 4; ++i) {
1229		struct net2272_ep *ep;
1230
1231		ep = &dev->ep[i];
1232		if (i && !ep->desc)
1233			continue;
1234
1235		t1 = net2272_ep_read(ep, EP_CFG);
1236		t2 = net2272_ep_read(ep, EP_RSPSET);
1237		t = scnprintf(next, size,
1238			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1239			"irqenb %02x\n",
1240			ep->ep.name, t1, t2,
1241			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1242			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1243			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1244			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1245			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1246			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1247			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1248			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1249			net2272_ep_read(ep, EP_IRQENB));
1250		size -= t;
1251		next += t;
1252
1253		t = scnprintf(next, size,
1254			"\tstat0 %02x stat1 %02x avail %04x "
1255			"(ep%d%s-%s)%s\n",
1256			net2272_ep_read(ep, EP_STAT0),
1257			net2272_ep_read(ep, EP_STAT1),
1258			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1259			t1 & 0x0f,
1260			ep->is_in ? "in" : "out",
1261			type_string(t1 >> 5),
1262			ep->stopped ? "*" : "");
1263		size -= t;
1264		next += t;
1265
1266		t = scnprintf(next, size,
1267			"\tep_transfer %06x\n",
1268			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1269			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1270			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1271		size -= t;
1272		next += t;
1273
1274		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1275		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1276		t = scnprintf(next, size,
1277			"\tbuf-a %s buf-b %s\n",
1278			buf_state_string(t1),
1279			buf_state_string(t2));
1280		size -= t;
1281		next += t;
1282	}
1283
1284	spin_unlock_irqrestore(&dev->lock, flags);
1285
1286	return PAGE_SIZE - size;
1287}
1288static DEVICE_ATTR_RO(registers);
1289
1290/*---------------------------------------------------------------------------*/
1291
1292static void
1293net2272_set_fifo_mode(struct net2272 *dev, int mode)
1294{
1295	u8 tmp;
1296
1297	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1298	tmp |= (mode << 6);
1299	net2272_write(dev, LOCCTL, tmp);
1300
1301	INIT_LIST_HEAD(&dev->gadget.ep_list);
1302
1303	/* always ep-a, ep-c ... maybe not ep-b */
1304	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1305
1306	switch (mode) {
1307	case 0:
1308		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1309		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1310		break;
1311	case 1:
1312		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1313		dev->ep[1].fifo_size = 1024;
1314		dev->ep[2].fifo_size = 512;
1315		break;
1316	case 2:
1317		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1318		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1319		break;
1320	case 3:
1321		dev->ep[1].fifo_size = 1024;
1322		break;
1323	}
1324
1325	/* ep-c is always 2 512 byte buffers */
1326	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1327	dev->ep[3].fifo_size = 512;
1328}
1329
1330/*---------------------------------------------------------------------------*/
1331
1332static void
1333net2272_usb_reset(struct net2272 *dev)
1334{
1335	dev->gadget.speed = USB_SPEED_UNKNOWN;
1336
1337	net2272_cancel_dma(dev);
1338
1339	net2272_write(dev, IRQENB0, 0);
1340	net2272_write(dev, IRQENB1, 0);
1341
1342	/* clear irq state */
1343	net2272_write(dev, IRQSTAT0, 0xff);
1344	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1345
1346	net2272_write(dev, DMAREQ,
1347		(0 << DMA_BUFFER_VALID) |
1348		(0 << DMA_REQUEST_ENABLE) |
1349		(1 << DMA_CONTROL_DACK) |
1350		(dev->dma_eot_polarity << EOT_POLARITY) |
1351		(dev->dma_dack_polarity << DACK_POLARITY) |
1352		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1353		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1354
1355	net2272_cancel_dma(dev);
1356	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1357
1358	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1359	 * note that the higher level gadget drivers are expected to convert data to little endian.
1360	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1361	 */
1362	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1363	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1364}
1365
1366static void
1367net2272_usb_reinit(struct net2272 *dev)
1368{
1369	int i;
1370
1371	/* basic endpoint init */
1372	for (i = 0; i < 4; ++i) {
1373		struct net2272_ep *ep = &dev->ep[i];
1374
1375		ep->ep.name = ep_name[i];
1376		ep->dev = dev;
1377		ep->num = i;
1378		ep->not_empty = 0;
1379
1380		if (use_dma && ep->num == dma_ep)
1381			ep->dma = 1;
1382
1383		if (i > 0 && i <= 3)
1384			ep->fifo_size = 512;
1385		else
1386			ep->fifo_size = 64;
1387		net2272_ep_reset(ep);
1388
1389		if (i == 0) {
1390			ep->ep.caps.type_control = true;
1391		} else {
1392			ep->ep.caps.type_iso = true;
1393			ep->ep.caps.type_bulk = true;
1394			ep->ep.caps.type_int = true;
1395		}
1396
1397		ep->ep.caps.dir_in = true;
1398		ep->ep.caps.dir_out = true;
1399	}
1400	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1401
1402	dev->gadget.ep0 = &dev->ep[0].ep;
1403	dev->ep[0].stopped = 0;
1404	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1405}
1406
1407static void
1408net2272_ep0_start(struct net2272 *dev)
1409{
1410	struct net2272_ep *ep0 = &dev->ep[0];
1411
1412	net2272_ep_write(ep0, EP_RSPSET,
1413		(1 << NAK_OUT_PACKETS_MODE) |
1414		(1 << ALT_NAK_OUT_PACKETS));
1415	net2272_ep_write(ep0, EP_RSPCLR,
1416		(1 << HIDE_STATUS_PHASE) |
1417		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1418	net2272_write(dev, USBCTL0,
1419		(dev->softconnect << USB_DETECT_ENABLE) |
1420		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1421		(1 << IO_WAKEUP_ENABLE));
1422	net2272_write(dev, IRQENB0,
1423		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1424		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1425		(1 << DMA_DONE_INTERRUPT_ENABLE));
1426	net2272_write(dev, IRQENB1,
1427		(1 << VBUS_INTERRUPT_ENABLE) |
1428		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1429		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1430}
1431
1432/* when a driver is successfully registered, it will receive
1433 * control requests including set_configuration(), which enables
1434 * non-control requests.  then usb traffic follows until a
1435 * disconnect is reported.  then a host may connect again, or
1436 * the driver might get unbound.
1437 */
1438static int net2272_start(struct usb_gadget *_gadget,
1439		struct usb_gadget_driver *driver)
1440{
1441	struct net2272 *dev;
1442	unsigned i;
1443
1444	if (!driver || !driver->setup ||
1445	    driver->max_speed != USB_SPEED_HIGH)
1446		return -EINVAL;
1447
1448	dev = container_of(_gadget, struct net2272, gadget);
1449
1450	for (i = 0; i < 4; ++i)
1451		dev->ep[i].irqs = 0;
1452	/* hook up the driver ... */
1453	dev->softconnect = 1;
 
1454	dev->driver = driver;
1455
1456	/* ... then enable host detection and ep0; and we're ready
1457	 * for set_configuration as well as eventual disconnect.
1458	 */
1459	net2272_ep0_start(dev);
1460
1461	return 0;
1462}
1463
1464static void
1465stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1466{
1467	int i;
1468
1469	/* don't disconnect if it's not connected */
1470	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1471		driver = NULL;
1472
1473	/* stop hardware; prevent new request submissions;
1474	 * and kill any outstanding requests.
1475	 */
1476	net2272_usb_reset(dev);
1477	for (i = 0; i < 4; ++i)
1478		net2272_dequeue_all(&dev->ep[i]);
1479
1480	/* report disconnect; the driver is already quiesced */
1481	if (dev->async_callbacks && driver) {
1482		spin_unlock(&dev->lock);
1483		driver->disconnect(&dev->gadget);
1484		spin_lock(&dev->lock);
1485	}
1486
1487	net2272_usb_reinit(dev);
1488}
1489
1490static int net2272_stop(struct usb_gadget *_gadget)
1491{
1492	struct net2272 *dev;
1493	unsigned long flags;
1494
1495	dev = container_of(_gadget, struct net2272, gadget);
1496
1497	spin_lock_irqsave(&dev->lock, flags);
1498	stop_activity(dev, NULL);
1499	spin_unlock_irqrestore(&dev->lock, flags);
1500
1501	dev->driver = NULL;
1502
1503	return 0;
1504}
1505
1506static void net2272_async_callbacks(struct usb_gadget *_gadget, bool enable)
1507{
1508	struct net2272	*dev = container_of(_gadget, struct net2272, gadget);
1509
1510	spin_lock_irq(&dev->lock);
1511	dev->async_callbacks = enable;
1512	spin_unlock_irq(&dev->lock);
1513}
1514
1515/*---------------------------------------------------------------------------*/
1516/* handle ep-a/ep-b dma completions */
1517static void
1518net2272_handle_dma(struct net2272_ep *ep)
1519{
1520	struct net2272_request *req;
1521	unsigned len;
1522	int status;
1523
1524	if (!list_empty(&ep->queue))
1525		req = list_entry(ep->queue.next,
1526				struct net2272_request, queue);
1527	else
1528		req = NULL;
1529
1530	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1531
1532	/* Ensure DREQ is de-asserted */
1533	net2272_write(ep->dev, DMAREQ,
1534		(0 << DMA_BUFFER_VALID)
1535	      | (0 << DMA_REQUEST_ENABLE)
1536	      | (1 << DMA_CONTROL_DACK)
1537	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1538	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1539	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1540	      | (ep->dma << DMA_ENDPOINT_SELECT));
1541
1542	ep->dev->dma_busy = 0;
1543
1544	net2272_ep_write(ep, EP_IRQENB,
1545		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1546		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1547		| net2272_ep_read(ep, EP_IRQENB));
1548
1549	/* device-to-host transfer completed */
1550	if (ep->is_in) {
1551		/* validate a short packet or zlp if necessary */
1552		if ((req->req.length % ep->ep.maxpacket != 0) ||
1553				req->req.zero)
1554			set_fifo_bytecount(ep, 0);
1555
1556		net2272_done(ep, req, 0);
1557		if (!list_empty(&ep->queue)) {
1558			req = list_entry(ep->queue.next,
1559					struct net2272_request, queue);
1560			status = net2272_kick_dma(ep, req);
1561			if (status < 0)
1562				net2272_pio_advance(ep);
1563		}
1564
1565	/* host-to-device transfer completed */
1566	} else {
1567		/* terminated with a short packet? */
1568		if (net2272_read(ep->dev, IRQSTAT0) &
1569				(1 << DMA_DONE_INTERRUPT)) {
1570			/* abort system dma */
1571			net2272_cancel_dma(ep->dev);
1572		}
1573
1574		/* EP_TRANSFER will contain the number of bytes
1575		 * actually received.
1576		 * NOTE: There is no overflow detection on EP_TRANSFER:
1577		 * We can't deal with transfers larger than 2^24 bytes!
1578		 */
1579		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1580			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1581			| (net2272_ep_read(ep, EP_TRANSFER0));
1582
1583		if (ep->not_empty)
1584			len += 4;
1585
1586		req->req.actual += len;
1587
1588		/* get any remaining data */
1589		net2272_pio_advance(ep);
1590	}
1591}
1592
1593/*---------------------------------------------------------------------------*/
1594
1595static void
1596net2272_handle_ep(struct net2272_ep *ep)
1597{
1598	struct net2272_request *req;
1599	u8 stat0, stat1;
1600
1601	if (!list_empty(&ep->queue))
1602		req = list_entry(ep->queue.next,
1603			struct net2272_request, queue);
1604	else
1605		req = NULL;
1606
1607	/* ack all, and handle what we care about */
1608	stat0 = net2272_ep_read(ep, EP_STAT0);
1609	stat1 = net2272_ep_read(ep, EP_STAT1);
1610	ep->irqs++;
1611
1612	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1613		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1614
1615	net2272_ep_write(ep, EP_STAT0, stat0 &
1616		~((1 << NAK_OUT_PACKETS)
1617		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1618	net2272_ep_write(ep, EP_STAT1, stat1);
1619
1620	/* data packet(s) received (in the fifo, OUT)
1621	 * direction must be validated, otherwise control read status phase
1622	 * could be interpreted as a valid packet
1623	 */
1624	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1625		net2272_pio_advance(ep);
1626	/* data packet(s) transmitted (IN) */
1627	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1628		net2272_pio_advance(ep);
1629}
1630
1631static struct net2272_ep *
1632net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1633{
1634	struct net2272_ep *ep;
1635
1636	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1637		return &dev->ep[0];
1638
1639	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1640		u8 bEndpointAddress;
1641
1642		if (!ep->desc)
1643			continue;
1644		bEndpointAddress = ep->desc->bEndpointAddress;
1645		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1646			continue;
1647		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1648			return ep;
1649	}
1650	return NULL;
1651}
1652
1653/*
1654 * USB Test Packet:
1655 * JKJKJKJK * 9
1656 * JJKKJJKK * 8
1657 * JJJJKKKK * 8
1658 * JJJJJJJKKKKKKK * 8
1659 * JJJJJJJK * 8
1660 * {JKKKKKKK * 10}, JK
1661 */
1662static const u8 net2272_test_packet[] = {
1663	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1664	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1665	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1666	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1667	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1668	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1669};
1670
1671static void
1672net2272_set_test_mode(struct net2272 *dev, int mode)
1673{
1674	int i;
1675
1676	/* Disable all net2272 interrupts:
1677	 * Nothing but a power cycle should stop the test.
1678	 */
1679	net2272_write(dev, IRQENB0, 0x00);
1680	net2272_write(dev, IRQENB1, 0x00);
1681
1682	/* Force tranceiver to high-speed */
1683	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1684
1685	net2272_write(dev, PAGESEL, 0);
1686	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1687	net2272_write(dev, EP_RSPCLR,
1688			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1689			| (1 << HIDE_STATUS_PHASE));
1690	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1691	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1692
1693	/* wait for status phase to complete */
1694	while (!(net2272_read(dev, EP_STAT0) &
1695				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1696		;
1697
1698	/* Enable test mode */
1699	net2272_write(dev, USBTEST, mode);
1700
1701	/* load test packet */
1702	if (mode == USB_TEST_PACKET) {
1703		/* switch to 8 bit mode */
1704		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1705				~(1 << DATA_WIDTH));
1706
1707		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1708			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1709
1710		/* Validate test packet */
1711		net2272_write(dev, EP_TRANSFER0, 0);
1712	}
1713}
1714
1715static void
1716net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1717{
1718	struct net2272_ep *ep;
1719	u8 num, scratch;
1720
1721	/* starting a control request? */
1722	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1723		union {
1724			u8 raw[8];
1725			struct usb_ctrlrequest	r;
1726		} u;
1727		int tmp = 0;
1728		struct net2272_request *req;
1729
1730		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1731			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1732				dev->gadget.speed = USB_SPEED_HIGH;
1733			else
1734				dev->gadget.speed = USB_SPEED_FULL;
1735			dev_dbg(dev->dev, "%s\n",
1736				usb_speed_string(dev->gadget.speed));
1737		}
1738
1739		ep = &dev->ep[0];
1740		ep->irqs++;
1741
1742		/* make sure any leftover interrupt state is cleared */
1743		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1744		while (!list_empty(&ep->queue)) {
1745			req = list_entry(ep->queue.next,
1746				struct net2272_request, queue);
1747			net2272_done(ep, req,
1748				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1749		}
1750		ep->stopped = 0;
1751		dev->protocol_stall = 0;
1752		net2272_ep_write(ep, EP_STAT0,
1753			    (1 << DATA_IN_TOKEN_INTERRUPT)
1754			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1755			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1756			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1757			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1758		net2272_ep_write(ep, EP_STAT1,
1759			    (1 << TIMEOUT)
1760			  | (1 << USB_OUT_ACK_SENT)
1761			  | (1 << USB_OUT_NAK_SENT)
1762			  | (1 << USB_IN_ACK_RCVD)
1763			  | (1 << USB_IN_NAK_SENT)
1764			  | (1 << USB_STALL_SENT)
1765			  | (1 << LOCAL_OUT_ZLP));
1766
1767		/*
1768		 * Ensure Control Read pre-validation setting is beyond maximum size
1769		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1770		 *    an EP0 transfer following the Control Write is a Control Read,
1771		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1772		 *    pre-validation count.
1773		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1774		 *    the pre-validation count cannot cause an unexpected validatation
1775		 */
1776		net2272_write(dev, PAGESEL, 0);
1777		net2272_write(dev, EP_TRANSFER2, 0xff);
1778		net2272_write(dev, EP_TRANSFER1, 0xff);
1779		net2272_write(dev, EP_TRANSFER0, 0xff);
1780
1781		u.raw[0] = net2272_read(dev, SETUP0);
1782		u.raw[1] = net2272_read(dev, SETUP1);
1783		u.raw[2] = net2272_read(dev, SETUP2);
1784		u.raw[3] = net2272_read(dev, SETUP3);
1785		u.raw[4] = net2272_read(dev, SETUP4);
1786		u.raw[5] = net2272_read(dev, SETUP5);
1787		u.raw[6] = net2272_read(dev, SETUP6);
1788		u.raw[7] = net2272_read(dev, SETUP7);
1789		/*
1790		 * If you have a big endian cpu make sure le16_to_cpus
1791		 * performs the proper byte swapping here...
1792		 */
1793		le16_to_cpus(&u.r.wValue);
1794		le16_to_cpus(&u.r.wIndex);
1795		le16_to_cpus(&u.r.wLength);
1796
1797		/* ack the irq */
1798		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1799		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1800
1801		/* watch control traffic at the token level, and force
1802		 * synchronization before letting the status phase happen.
1803		 */
1804		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1805		if (ep->is_in) {
1806			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1807				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1808				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1809			stop_out_naking(ep);
1810		} else
1811			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1812				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1813				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1814		net2272_ep_write(ep, EP_IRQENB, scratch);
1815
1816		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1817			goto delegate;
1818		switch (u.r.bRequest) {
1819		case USB_REQ_GET_STATUS: {
1820			struct net2272_ep *e;
1821			u16 status = 0;
1822
1823			switch (u.r.bRequestType & USB_RECIP_MASK) {
1824			case USB_RECIP_ENDPOINT:
1825				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1826				if (!e || u.r.wLength > 2)
1827					goto do_stall;
1828				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1829					status = cpu_to_le16(1);
1830				else
1831					status = cpu_to_le16(0);
1832
1833				/* don't bother with a request object! */
1834				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1835				writew(status, net2272_reg_addr(dev, EP_DATA));
1836				set_fifo_bytecount(&dev->ep[0], 0);
1837				allow_status(ep);
1838				dev_vdbg(dev->dev, "%s stat %02x\n",
1839					ep->ep.name, status);
1840				goto next_endpoints;
1841			case USB_RECIP_DEVICE:
1842				if (u.r.wLength > 2)
1843					goto do_stall;
1844				if (dev->gadget.is_selfpowered)
1845					status = (1 << USB_DEVICE_SELF_POWERED);
1846
1847				/* don't bother with a request object! */
1848				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1849				writew(status, net2272_reg_addr(dev, EP_DATA));
1850				set_fifo_bytecount(&dev->ep[0], 0);
1851				allow_status(ep);
1852				dev_vdbg(dev->dev, "device stat %02x\n", status);
1853				goto next_endpoints;
1854			case USB_RECIP_INTERFACE:
1855				if (u.r.wLength > 2)
1856					goto do_stall;
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "interface status %02x\n", status);
1864				goto next_endpoints;
1865			}
1866
1867			break;
1868		}
1869		case USB_REQ_CLEAR_FEATURE: {
1870			struct net2272_ep *e;
1871
1872			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1873				goto delegate;
1874			if (u.r.wValue != USB_ENDPOINT_HALT ||
1875			    u.r.wLength != 0)
1876				goto do_stall;
1877			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1878			if (!e)
1879				goto do_stall;
1880			if (e->wedged) {
1881				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1882					ep->ep.name);
1883			} else {
1884				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1885				clear_halt(e);
1886			}
1887			allow_status(ep);
1888			goto next_endpoints;
1889		}
1890		case USB_REQ_SET_FEATURE: {
1891			struct net2272_ep *e;
1892
1893			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1894				if (u.r.wIndex != NORMAL_OPERATION)
1895					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1896				allow_status(ep);
1897				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1898				goto next_endpoints;
1899			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1900				goto delegate;
1901			if (u.r.wValue != USB_ENDPOINT_HALT ||
1902			    u.r.wLength != 0)
1903				goto do_stall;
1904			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1905			if (!e)
1906				goto do_stall;
1907			set_halt(e);
1908			allow_status(ep);
1909			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1910			goto next_endpoints;
1911		}
1912		case USB_REQ_SET_ADDRESS: {
1913			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1914			allow_status(ep);
1915			break;
1916		}
1917		default:
1918 delegate:
1919			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1920				"ep_cfg %08x\n",
1921				u.r.bRequestType, u.r.bRequest,
1922				u.r.wValue, u.r.wIndex,
1923				net2272_ep_read(ep, EP_CFG));
1924			if (dev->async_callbacks) {
1925				spin_unlock(&dev->lock);
1926				tmp = dev->driver->setup(&dev->gadget, &u.r);
1927				spin_lock(&dev->lock);
1928			}
1929		}
1930
1931		/* stall ep0 on error */
1932		if (tmp < 0) {
1933 do_stall:
1934			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1935				u.r.bRequestType, u.r.bRequest, tmp);
1936			dev->protocol_stall = 1;
1937		}
1938	/* endpoint dma irq? */
1939	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1940		net2272_cancel_dma(dev);
1941		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1942		stat &= ~(1 << DMA_DONE_INTERRUPT);
1943		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1944			? 2 : 1;
1945
1946		ep = &dev->ep[num];
1947		net2272_handle_dma(ep);
1948	}
1949
1950 next_endpoints:
1951	/* endpoint data irq? */
1952	scratch = stat & 0x0f;
1953	stat &= ~0x0f;
1954	for (num = 0; scratch; num++) {
1955		u8 t;
1956
1957		/* does this endpoint's FIFO and queue need tending? */
1958		t = 1 << num;
1959		if ((scratch & t) == 0)
1960			continue;
1961		scratch ^= t;
1962
1963		ep = &dev->ep[num];
1964		net2272_handle_ep(ep);
1965	}
1966
1967	/* some interrupts we can just ignore */
1968	stat &= ~(1 << SOF_INTERRUPT);
1969
1970	if (stat)
1971		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1972}
1973
1974static void
1975net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1976{
1977	u8 tmp, mask;
1978
1979	/* after disconnect there's nothing else to do! */
1980	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1981	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1982
1983	if (stat & tmp) {
1984		bool	reset = false;
1985		bool	disconnect = false;
1986
1987		/*
1988		 * Ignore disconnects and resets if the speed hasn't been set.
1989		 * VBUS can bounce and there's always an initial reset.
1990		 */
1991		net2272_write(dev, IRQSTAT1, tmp);
1992		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
1993			if ((stat & (1 << VBUS_INTERRUPT)) &&
1994					(net2272_read(dev, USBCTL1) &
1995						(1 << VBUS_PIN)) == 0) {
1996				disconnect = true;
1997				dev_dbg(dev->dev, "disconnect %s\n",
1998					dev->driver->driver.name);
1999			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2000					(net2272_read(dev, USBCTL1) & mask)
2001						== 0) {
2002				reset = true;
2003				dev_dbg(dev->dev, "reset %s\n",
2004					dev->driver->driver.name);
2005			}
2006
2007			if (disconnect || reset) {
2008				stop_activity(dev, dev->driver);
2009				net2272_ep0_start(dev);
2010				if (dev->async_callbacks) {
2011					spin_unlock(&dev->lock);
2012					if (reset)
2013						usb_gadget_udc_reset(&dev->gadget, dev->driver);
2014					else
2015						(dev->driver->disconnect)(&dev->gadget);
2016					spin_lock(&dev->lock);
2017				}
2018				return;
2019			}
2020		}
2021		stat &= ~tmp;
2022
2023		if (!stat)
2024			return;
2025	}
2026
2027	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2028	if (stat & tmp) {
2029		net2272_write(dev, IRQSTAT1, tmp);
2030		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2031			if (dev->async_callbacks && dev->driver->suspend)
2032				dev->driver->suspend(&dev->gadget);
2033			if (!enable_suspend) {
2034				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2035				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2036			}
2037		} else {
2038			if (dev->async_callbacks && dev->driver->resume)
2039				dev->driver->resume(&dev->gadget);
2040		}
2041		stat &= ~tmp;
2042	}
2043
2044	/* clear any other status/irqs */
2045	if (stat)
2046		net2272_write(dev, IRQSTAT1, stat);
2047
2048	/* some status we can just ignore */
2049	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2050			| (1 << SUSPEND_REQUEST_INTERRUPT)
2051			| (1 << RESUME_INTERRUPT));
2052	if (!stat)
2053		return;
2054	else
2055		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2056}
2057
2058static irqreturn_t net2272_irq(int irq, void *_dev)
2059{
2060	struct net2272 *dev = _dev;
2061#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2062	u32 intcsr;
2063#endif
2064#if defined(PLX_PCI_RDK)
2065	u8 dmareq;
2066#endif
2067	spin_lock(&dev->lock);
2068#if defined(PLX_PCI_RDK)
2069	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2070
2071	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2072		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2073				dev->rdk1.plx9054_base_addr + INTCSR);
2074		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2075		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2076		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2077		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2078			dev->rdk1.plx9054_base_addr + INTCSR);
2079	}
2080	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2081		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2082				dev->rdk1.plx9054_base_addr + DMACSR0);
2083
2084		dmareq = net2272_read(dev, DMAREQ);
2085		if (dmareq & 0x01)
2086			net2272_handle_dma(&dev->ep[2]);
2087		else
2088			net2272_handle_dma(&dev->ep[1]);
2089	}
2090#endif
2091#if defined(PLX_PCI_RDK2)
2092	/* see if PCI int for us by checking irqstat */
2093	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2094	if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2095		spin_unlock(&dev->lock);
2096		return IRQ_NONE;
2097	}
2098	/* check dma interrupts */
2099#endif
2100	/* Platform/devcice interrupt handler */
2101#if !defined(PLX_PCI_RDK)
2102	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2103	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2104#endif
2105	spin_unlock(&dev->lock);
2106
2107	return IRQ_HANDLED;
2108}
2109
2110static int net2272_present(struct net2272 *dev)
2111{
2112	/*
2113	 * Quick test to see if CPU can communicate properly with the NET2272.
2114	 * Verifies connection using writes and reads to write/read and
2115	 * read-only registers.
2116	 *
2117	 * This routine is strongly recommended especially during early bring-up
2118	 * of new hardware, however for designs that do not apply Power On System
2119	 * Tests (POST) it may discarded (or perhaps minimized).
2120	 */
2121	unsigned int ii;
2122	u8 val, refval;
2123
2124	/* Verify NET2272 write/read SCRATCH register can write and read */
2125	refval = net2272_read(dev, SCRATCH);
2126	for (ii = 0; ii < 0x100; ii += 7) {
2127		net2272_write(dev, SCRATCH, ii);
2128		val = net2272_read(dev, SCRATCH);
2129		if (val != ii) {
2130			dev_dbg(dev->dev,
2131				"%s: write/read SCRATCH register test failed: "
2132				"wrote:0x%2.2x, read:0x%2.2x\n",
2133				__func__, ii, val);
2134			return -EINVAL;
2135		}
2136	}
2137	/* To be nice, we write the original SCRATCH value back: */
2138	net2272_write(dev, SCRATCH, refval);
2139
2140	/* Verify NET2272 CHIPREV register is read-only: */
2141	refval = net2272_read(dev, CHIPREV_2272);
2142	for (ii = 0; ii < 0x100; ii += 7) {
2143		net2272_write(dev, CHIPREV_2272, ii);
2144		val = net2272_read(dev, CHIPREV_2272);
2145		if (val != refval) {
2146			dev_dbg(dev->dev,
2147				"%s: write/read CHIPREV register test failed: "
2148				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2149				__func__, ii, val, refval);
2150			return -EINVAL;
2151		}
2152	}
2153
2154	/*
2155	 * Verify NET2272's "NET2270 legacy revision" register
2156	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2157	 *    register should read the same value, regardless of the NET2272
2158	 *    silicon revision.  The legacy register applies to NET2270
2159	 *    firmware being applied to the NET2272.
2160	 */
2161	val = net2272_read(dev, CHIPREV_LEGACY);
2162	if (val != NET2270_LEGACY_REV) {
2163		/*
2164		 * Unexpected legacy revision value
2165		 * - Perhaps the chip is a NET2270?
2166		 */
2167		dev_dbg(dev->dev,
2168			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2169			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2170			__func__, NET2270_LEGACY_REV, val);
2171		return -EINVAL;
2172	}
2173
2174	/*
2175	 * Verify NET2272 silicon revision
2176	 *  - This revision register is appropriate for the silicon version
2177	 *    of the NET2272
2178	 */
2179	val = net2272_read(dev, CHIPREV_2272);
2180	switch (val) {
2181	case CHIPREV_NET2272_R1:
2182		/*
2183		 * NET2272 Rev 1 has DMA related errata:
2184		 *  - Newer silicon (Rev 1A or better) required
2185		 */
2186		dev_dbg(dev->dev,
2187			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2188			__func__);
2189		break;
2190	case CHIPREV_NET2272_R1A:
2191		break;
2192	default:
2193		/* NET2272 silicon version *may* not work with this firmware */
2194		dev_dbg(dev->dev,
2195			"%s: unexpected silicon revision register value: "
2196			" CHIPREV_2272: 0x%2.2x\n",
2197			__func__, val);
2198		/*
2199		 * Return Success, even though the chip rev is not an expected value
2200		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2201		 *  - Often, new silicon is perfectly compatible
2202		 */
2203	}
2204
2205	/* Success: NET2272 checks out OK */
2206	return 0;
2207}
2208
2209static void
2210net2272_gadget_release(struct device *_dev)
2211{
2212	struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev);
2213
2214	kfree(dev);
2215}
2216
2217/*---------------------------------------------------------------------------*/
2218
2219static void
2220net2272_remove(struct net2272 *dev)
2221{
2222	if (dev->added)
2223		usb_del_gadget(&dev->gadget);
2224	free_irq(dev->irq, dev);
2225	iounmap(dev->base_addr);
2226	device_remove_file(dev->dev, &dev_attr_registers);
2227
2228	dev_info(dev->dev, "unbind\n");
2229}
2230
2231static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2232{
2233	struct net2272 *ret;
2234
2235	if (!irq) {
2236		dev_dbg(dev, "No IRQ!\n");
2237		return ERR_PTR(-ENODEV);
2238	}
2239
2240	/* alloc, and start init */
2241	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2242	if (!ret)
2243		return ERR_PTR(-ENOMEM);
2244
2245	spin_lock_init(&ret->lock);
2246	ret->irq = irq;
2247	ret->dev = dev;
2248	ret->gadget.ops = &net2272_ops;
2249	ret->gadget.max_speed = USB_SPEED_HIGH;
2250
2251	/* the "gadget" abstracts/virtualizes the controller */
2252	ret->gadget.name = driver_name;
2253	usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release);
2254
2255	return ret;
2256}
2257
2258static int
2259net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2260{
2261	int ret;
2262
2263	/* See if there... */
2264	if (net2272_present(dev)) {
2265		dev_warn(dev->dev, "2272 not found!\n");
2266		ret = -ENODEV;
2267		goto err;
2268	}
2269
2270	net2272_usb_reset(dev);
2271	net2272_usb_reinit(dev);
2272
2273	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2274	if (ret) {
2275		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2276		goto err;
2277	}
2278
2279	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2280
2281	/* done */
2282	dev_info(dev->dev, "%s\n", driver_desc);
2283	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2284		dev->irq, dev->base_addr, dev->chiprev,
2285		dma_mode_string());
2286	dev_info(dev->dev, "version: %s\n", driver_vers);
2287
2288	ret = device_create_file(dev->dev, &dev_attr_registers);
2289	if (ret)
2290		goto err_irq;
2291
2292	ret = usb_add_gadget(&dev->gadget);
 
2293	if (ret)
2294		goto err_add_udc;
2295	dev->added = 1;
2296
2297	return 0;
2298
2299err_add_udc:
2300	device_remove_file(dev->dev, &dev_attr_registers);
2301 err_irq:
2302	free_irq(dev->irq, dev);
2303 err:
2304	return ret;
2305}
2306
2307#ifdef CONFIG_USB_PCI
2308
2309/*
2310 * wrap this driver around the specified device, but
2311 * don't respond over USB until a gadget driver binds to us
2312 */
2313
2314static int
2315net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2316{
2317	unsigned long resource, len, tmp;
2318	void __iomem *mem_mapped_addr[4];
2319	int ret, i;
2320
2321	/*
2322	 * BAR 0 holds PLX 9054 config registers
2323	 * BAR 1 is i/o memory; unused here
2324	 * BAR 2 holds EPLD config registers
2325	 * BAR 3 holds NET2272 registers
2326	 */
2327
2328	/* Find and map all address spaces */
2329	for (i = 0; i < 4; ++i) {
2330		if (i == 1)
2331			continue;	/* BAR1 unused */
2332
2333		resource = pci_resource_start(pdev, i);
2334		len = pci_resource_len(pdev, i);
2335
2336		if (!request_mem_region(resource, len, driver_name)) {
2337			dev_dbg(dev->dev, "controller already in use\n");
2338			ret = -EBUSY;
2339			goto err;
2340		}
2341
2342		mem_mapped_addr[i] = ioremap(resource, len);
2343		if (mem_mapped_addr[i] == NULL) {
2344			release_mem_region(resource, len);
2345			dev_dbg(dev->dev, "can't map memory\n");
2346			ret = -EFAULT;
2347			goto err;
2348		}
2349	}
2350
2351	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2352	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2353	dev->base_addr = mem_mapped_addr[3];
2354
2355	/* Set PLX 9054 bus width (16 bits) */
2356	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2357	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2358			dev->rdk1.plx9054_base_addr + LBRD1);
2359
2360	/* Enable PLX 9054 Interrupts */
2361	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2362			(1 << PCI_INTERRUPT_ENABLE) |
2363			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2364			dev->rdk1.plx9054_base_addr + INTCSR);
2365
2366	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2367			dev->rdk1.plx9054_base_addr + DMACSR0);
2368
2369	/* reset */
2370	writeb((1 << EPLD_DMA_ENABLE) |
2371		(1 << DMA_CTL_DACK) |
2372		(1 << DMA_TIMEOUT_ENABLE) |
2373		(1 << USER) |
2374		(0 << MPX_MODE) |
2375		(1 << BUSWIDTH) |
2376		(1 << NET2272_RESET),
2377		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2378
2379	mb();
2380	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2381		~(1 << NET2272_RESET),
2382		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2383	udelay(200);
2384
2385	return 0;
2386
2387 err:
2388	while (--i >= 0) {
2389		if (i == 1)
2390			continue;	/* BAR1 unused */
2391		iounmap(mem_mapped_addr[i]);
2392		release_mem_region(pci_resource_start(pdev, i),
2393			pci_resource_len(pdev, i));
2394	}
2395
2396	return ret;
2397}
2398
2399static int
2400net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2401{
2402	unsigned long resource, len;
2403	void __iomem *mem_mapped_addr[2];
2404	int ret, i;
2405
2406	/*
2407	 * BAR 0 holds FGPA config registers
2408	 * BAR 1 holds NET2272 registers
2409	 */
2410
2411	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2412	for (i = 0; i < 2; ++i) {
2413		resource = pci_resource_start(pdev, i);
2414		len = pci_resource_len(pdev, i);
2415
2416		if (!request_mem_region(resource, len, driver_name)) {
2417			dev_dbg(dev->dev, "controller already in use\n");
2418			ret = -EBUSY;
2419			goto err;
2420		}
2421
2422		mem_mapped_addr[i] = ioremap(resource, len);
2423		if (mem_mapped_addr[i] == NULL) {
2424			release_mem_region(resource, len);
2425			dev_dbg(dev->dev, "can't map memory\n");
2426			ret = -EFAULT;
2427			goto err;
2428		}
2429	}
2430
2431	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2432	dev->base_addr = mem_mapped_addr[1];
2433
2434	mb();
2435	/* Set 2272 bus width (16 bits) and reset */
2436	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2437	udelay(200);
2438	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2439	/* Print fpga version number */
2440	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2441		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2442	/* Enable FPGA Interrupts */
2443	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2444
2445	return 0;
2446
2447 err:
2448	while (--i >= 0) {
2449		iounmap(mem_mapped_addr[i]);
2450		release_mem_region(pci_resource_start(pdev, i),
2451			pci_resource_len(pdev, i));
2452	}
2453
2454	return ret;
2455}
2456
2457static int
2458net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2459{
2460	struct net2272 *dev;
2461	int ret;
2462
2463	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2464	if (IS_ERR(dev))
2465		return PTR_ERR(dev);
2466	dev->dev_id = pdev->device;
2467
2468	if (pci_enable_device(pdev) < 0) {
2469		ret = -ENODEV;
2470		goto err_put;
2471	}
2472
2473	pci_set_master(pdev);
2474
2475	switch (pdev->device) {
2476	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2477	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2478	default: BUG();
2479	}
2480	if (ret)
2481		goto err_pci;
2482
2483	ret = net2272_probe_fin(dev, 0);
2484	if (ret)
2485		goto err_pci;
2486
2487	pci_set_drvdata(pdev, dev);
2488
2489	return 0;
2490
2491 err_pci:
2492	pci_disable_device(pdev);
2493 err_put:
2494	usb_put_gadget(&dev->gadget);
2495
2496	return ret;
2497}
2498
2499static void
2500net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2501{
2502	int i;
2503
2504	/* disable PLX 9054 interrupts */
2505	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2506		~(1 << PCI_INTERRUPT_ENABLE),
2507		dev->rdk1.plx9054_base_addr + INTCSR);
2508
2509	/* clean up resources allocated during probe() */
2510	iounmap(dev->rdk1.plx9054_base_addr);
2511	iounmap(dev->rdk1.epld_base_addr);
2512
2513	for (i = 0; i < 4; ++i) {
2514		if (i == 1)
2515			continue;	/* BAR1 unused */
2516		release_mem_region(pci_resource_start(pdev, i),
2517			pci_resource_len(pdev, i));
2518	}
2519}
2520
2521static void
2522net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2523{
2524	int i;
2525
2526	/* disable fpga interrupts
2527	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2528			~(1 << PCI_INTERRUPT_ENABLE),
2529			dev->rdk1.plx9054_base_addr + INTCSR);
2530	*/
2531
2532	/* clean up resources allocated during probe() */
2533	iounmap(dev->rdk2.fpga_base_addr);
2534
2535	for (i = 0; i < 2; ++i)
2536		release_mem_region(pci_resource_start(pdev, i),
2537			pci_resource_len(pdev, i));
2538}
2539
2540static void
2541net2272_pci_remove(struct pci_dev *pdev)
2542{
2543	struct net2272 *dev = pci_get_drvdata(pdev);
2544
2545	net2272_remove(dev);
2546
2547	switch (pdev->device) {
2548	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2549	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2550	default: BUG();
2551	}
2552
2553	pci_disable_device(pdev);
2554
2555	usb_put_gadget(&dev->gadget);
2556}
2557
2558/* Table of matching PCI IDs */
2559static struct pci_device_id pci_ids[] = {
2560	{	/* RDK 1 card */
2561		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2562		.class_mask  = 0,
2563		.vendor      = PCI_VENDOR_ID_PLX,
2564		.device      = PCI_DEVICE_ID_RDK1,
2565		.subvendor   = PCI_ANY_ID,
2566		.subdevice   = PCI_ANY_ID,
2567	},
2568	{	/* RDK 2 card */
2569		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2570		.class_mask  = 0,
2571		.vendor      = PCI_VENDOR_ID_PLX,
2572		.device      = PCI_DEVICE_ID_RDK2,
2573		.subvendor   = PCI_ANY_ID,
2574		.subdevice   = PCI_ANY_ID,
2575	},
2576	{ }
2577};
2578MODULE_DEVICE_TABLE(pci, pci_ids);
2579
2580static struct pci_driver net2272_pci_driver = {
2581	.name     = driver_name,
2582	.id_table = pci_ids,
2583
2584	.probe    = net2272_pci_probe,
2585	.remove   = net2272_pci_remove,
2586};
2587
2588static int net2272_pci_register(void)
2589{
2590	return pci_register_driver(&net2272_pci_driver);
2591}
2592
2593static void net2272_pci_unregister(void)
2594{
2595	pci_unregister_driver(&net2272_pci_driver);
2596}
2597
2598#else
2599static inline int net2272_pci_register(void) { return 0; }
2600static inline void net2272_pci_unregister(void) { }
2601#endif
2602
2603/*---------------------------------------------------------------------------*/
2604
2605static int
2606net2272_plat_probe(struct platform_device *pdev)
2607{
2608	struct net2272 *dev;
2609	int ret;
2610	unsigned int irqflags;
2611	resource_size_t base, len;
2612	struct resource *iomem, *iomem_bus, *irq_res;
2613
2614	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2615	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2616	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2617	if (!irq_res || !iomem) {
2618		dev_err(&pdev->dev, "must provide irq/base addr");
2619		return -EINVAL;
2620	}
2621
2622	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2623	if (IS_ERR(dev))
2624		return PTR_ERR(dev);
2625
2626	irqflags = 0;
2627	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2628		irqflags |= IRQF_TRIGGER_RISING;
2629	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2630		irqflags |= IRQF_TRIGGER_FALLING;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2632		irqflags |= IRQF_TRIGGER_HIGH;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2634		irqflags |= IRQF_TRIGGER_LOW;
2635
2636	base = iomem->start;
2637	len = resource_size(iomem);
2638	if (iomem_bus)
2639		dev->base_shift = iomem_bus->start;
2640
2641	if (!request_mem_region(base, len, driver_name)) {
2642		dev_dbg(dev->dev, "get request memory region!\n");
2643		ret = -EBUSY;
2644		goto err;
2645	}
2646	dev->base_addr = ioremap(base, len);
2647	if (!dev->base_addr) {
2648		dev_dbg(dev->dev, "can't map memory\n");
2649		ret = -EFAULT;
2650		goto err_req;
2651	}
2652
2653	ret = net2272_probe_fin(dev, irqflags);
2654	if (ret)
2655		goto err_io;
2656
2657	platform_set_drvdata(pdev, dev);
2658	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2659		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2660
2661	return 0;
2662
2663 err_io:
2664	iounmap(dev->base_addr);
2665 err_req:
2666	release_mem_region(base, len);
2667 err:
2668	usb_put_gadget(&dev->gadget);
2669
2670	return ret;
2671}
2672
2673static void
2674net2272_plat_remove(struct platform_device *pdev)
2675{
2676	struct net2272 *dev = platform_get_drvdata(pdev);
2677
2678	net2272_remove(dev);
2679
2680	release_mem_region(pdev->resource[0].start,
2681		resource_size(&pdev->resource[0]));
2682
2683	usb_put_gadget(&dev->gadget);
 
 
2684}
2685
2686static struct platform_driver net2272_plat_driver = {
2687	.probe   = net2272_plat_probe,
2688	.remove_new = net2272_plat_remove,
2689	.driver  = {
2690		.name  = driver_name,
2691	},
2692	/* FIXME .suspend, .resume */
2693};
2694MODULE_ALIAS("platform:net2272");
2695
2696static int __init net2272_init(void)
2697{
2698	int ret;
2699
2700	ret = net2272_pci_register();
2701	if (ret)
2702		return ret;
2703	ret = platform_driver_register(&net2272_plat_driver);
2704	if (ret)
2705		goto err_pci;
2706	return ret;
2707
2708err_pci:
2709	net2272_pci_unregister();
2710	return ret;
2711}
2712module_init(net2272_init);
2713
2714static void __exit net2272_cleanup(void)
2715{
2716	net2272_pci_unregister();
2717	platform_driver_unregister(&net2272_plat_driver);
2718}
2719module_exit(net2272_cleanup);
2720
2721MODULE_DESCRIPTION(DRIVER_DESC);
2722MODULE_AUTHOR("PLX Technology, Inc.");
2723MODULE_LICENSE("GPL");
v4.6
 
   1/*
   2 * Driver for PLX NET2272 USB device controller
   3 *
   4 * Copyright (C) 2005-2006 PLX Technology, Inc.
   5 * Copyright (C) 2006-2011 Analog Devices, Inc.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  20 */
  21
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/errno.h>
  25#include <linux/gpio.h>
  26#include <linux/init.h>
  27#include <linux/interrupt.h>
  28#include <linux/io.h>
  29#include <linux/ioport.h>
  30#include <linux/kernel.h>
  31#include <linux/list.h>
  32#include <linux/module.h>
  33#include <linux/moduleparam.h>
  34#include <linux/pci.h>
  35#include <linux/platform_device.h>
  36#include <linux/prefetch.h>
  37#include <linux/sched.h>
  38#include <linux/slab.h>
  39#include <linux/timer.h>
  40#include <linux/usb.h>
  41#include <linux/usb/ch9.h>
  42#include <linux/usb/gadget.h>
  43
  44#include <asm/byteorder.h>
  45#include <asm/unaligned.h>
  46
  47#include "net2272.h"
  48
  49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
  50
  51static const char driver_name[] = "net2272";
  52static const char driver_vers[] = "2006 October 17/mainline";
  53static const char driver_desc[] = DRIVER_DESC;
  54
  55static const char ep0name[] = "ep0";
  56static const char * const ep_name[] = {
  57	ep0name,
  58	"ep-a", "ep-b", "ep-c",
  59};
  60
  61#ifdef CONFIG_USB_NET2272_DMA
  62/*
  63 * use_dma: the NET2272 can use an external DMA controller.
  64 * Note that since there is no generic DMA api, some functions,
  65 * notably request_dma, start_dma, and cancel_dma will need to be
  66 * modified for your platform's particular dma controller.
  67 *
  68 * If use_dma is disabled, pio will be used instead.
  69 */
  70static bool use_dma = 0;
  71module_param(use_dma, bool, 0644);
  72
  73/*
  74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
  75 * The NET2272 can only use dma for a single endpoint at a time.
  76 * At some point this could be modified to allow either endpoint
  77 * to take control of dma as it becomes available.
  78 *
  79 * Note that DMA should not be used on OUT endpoints unless it can
  80 * be guaranteed that no short packets will arrive on an IN endpoint
  81 * while the DMA operation is pending.  Otherwise the OUT DMA will
  82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
  83 */
  84static ushort dma_ep = 1;
  85module_param(dma_ep, ushort, 0644);
  86
  87/*
  88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
  89 *	mode 0 == Slow DREQ mode
  90 *	mode 1 == Fast DREQ mode
  91 *	mode 2 == Burst mode
  92 */
  93static ushort dma_mode = 2;
  94module_param(dma_mode, ushort, 0644);
  95#else
  96#define use_dma 0
  97#define dma_ep 1
  98#define dma_mode 2
  99#endif
 100
 101/*
 102 * fifo_mode: net2272 buffer configuration:
 103 *      mode 0 == ep-{a,b,c} 512db each
 104 *      mode 1 == ep-a 1k, ep-{b,c} 512db
 105 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
 106 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
 107 */
 108static ushort fifo_mode = 0;
 109module_param(fifo_mode, ushort, 0644);
 110
 111/*
 112 * enable_suspend: When enabled, the driver will respond to
 113 * USB suspend requests by powering down the NET2272.  Otherwise,
 114 * USB suspend requests will be ignored.  This is acceptible for
 115 * self-powered devices.  For bus powered devices set this to 1.
 116 */
 117static ushort enable_suspend = 0;
 118module_param(enable_suspend, ushort, 0644);
 119
 120static void assert_out_naking(struct net2272_ep *ep, const char *where)
 121{
 122	u8 tmp;
 123
 124#ifndef DEBUG
 125	return;
 126#endif
 127
 128	tmp = net2272_ep_read(ep, EP_STAT0);
 129	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
 130		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
 131			ep->ep.name, where, tmp);
 132		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 133	}
 134}
 135#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
 136
 137static void stop_out_naking(struct net2272_ep *ep)
 138{
 139	u8 tmp = net2272_ep_read(ep, EP_STAT0);
 140
 141	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
 142		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 143}
 144
 145#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
 146
 147static char *type_string(u8 bmAttributes)
 148{
 149	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
 150	case USB_ENDPOINT_XFER_BULK: return "bulk";
 151	case USB_ENDPOINT_XFER_ISOC: return "iso";
 152	case USB_ENDPOINT_XFER_INT:  return "intr";
 153	default:                     return "control";
 154	}
 155}
 156
 157static char *buf_state_string(unsigned state)
 158{
 159	switch (state) {
 160	case BUFF_FREE:  return "free";
 161	case BUFF_VALID: return "valid";
 162	case BUFF_LCL:   return "local";
 163	case BUFF_USB:   return "usb";
 164	default:         return "unknown";
 165	}
 166}
 167
 168static char *dma_mode_string(void)
 169{
 170	if (!use_dma)
 171		return "PIO";
 172	switch (dma_mode) {
 173	case 0:  return "SLOW DREQ";
 174	case 1:  return "FAST DREQ";
 175	case 2:  return "BURST";
 176	default: return "invalid";
 177	}
 178}
 179
 180static void net2272_dequeue_all(struct net2272_ep *);
 181static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
 182static int net2272_fifo_status(struct usb_ep *);
 183
 184static struct usb_ep_ops net2272_ep_ops;
 185
 186/*---------------------------------------------------------------------------*/
 187
 188static int
 189net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 190{
 191	struct net2272 *dev;
 192	struct net2272_ep *ep;
 193	u32 max;
 194	u8 tmp;
 195	unsigned long flags;
 196
 197	ep = container_of(_ep, struct net2272_ep, ep);
 198	if (!_ep || !desc || ep->desc || _ep->name == ep0name
 199			|| desc->bDescriptorType != USB_DT_ENDPOINT)
 200		return -EINVAL;
 201	dev = ep->dev;
 202	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 203		return -ESHUTDOWN;
 204
 205	max = usb_endpoint_maxp(desc) & 0x1fff;
 206
 207	spin_lock_irqsave(&dev->lock, flags);
 208	_ep->maxpacket = max & 0x7fff;
 209	ep->desc = desc;
 210
 211	/* net2272_ep_reset() has already been called */
 212	ep->stopped = 0;
 213	ep->wedged = 0;
 214
 215	/* set speed-dependent max packet */
 216	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
 217	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
 218
 219	/* set type, direction, address; reset fifo counters */
 220	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 221	tmp = usb_endpoint_type(desc);
 222	if (usb_endpoint_xfer_bulk(desc)) {
 223		/* catch some particularly blatant driver bugs */
 224		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
 225		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
 226			spin_unlock_irqrestore(&dev->lock, flags);
 227			return -ERANGE;
 228		}
 229	}
 230	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
 231	tmp <<= ENDPOINT_TYPE;
 232	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
 233	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
 234	tmp |= (1 << ENDPOINT_ENABLE);
 235
 236	/* for OUT transfers, block the rx fifo until a read is posted */
 237	ep->is_in = usb_endpoint_dir_in(desc);
 238	if (!ep->is_in)
 239		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 240
 241	net2272_ep_write(ep, EP_CFG, tmp);
 242
 243	/* enable irqs */
 244	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
 245	net2272_write(dev, IRQENB0, tmp);
 246
 247	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
 248		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
 249		| net2272_ep_read(ep, EP_IRQENB);
 250	net2272_ep_write(ep, EP_IRQENB, tmp);
 251
 252	tmp = desc->bEndpointAddress;
 253	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
 254		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
 255		type_string(desc->bmAttributes), max,
 256		net2272_ep_read(ep, EP_CFG));
 257
 258	spin_unlock_irqrestore(&dev->lock, flags);
 259	return 0;
 260}
 261
 262static void net2272_ep_reset(struct net2272_ep *ep)
 263{
 264	u8 tmp;
 265
 266	ep->desc = NULL;
 267	INIT_LIST_HEAD(&ep->queue);
 268
 269	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
 270	ep->ep.ops = &net2272_ep_ops;
 271
 272	/* disable irqs, endpoint */
 273	net2272_ep_write(ep, EP_IRQENB, 0);
 274
 275	/* init to our chosen defaults, notably so that we NAK OUT
 276	 * packets until the driver queues a read.
 277	 */
 278	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
 279	net2272_ep_write(ep, EP_RSPSET, tmp);
 280
 281	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
 282	if (ep->num != 0)
 283		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
 284
 285	net2272_ep_write(ep, EP_RSPCLR, tmp);
 286
 287	/* scrub most status bits, and flush any fifo state */
 288	net2272_ep_write(ep, EP_STAT0,
 289			  (1 << DATA_IN_TOKEN_INTERRUPT)
 290			| (1 << DATA_OUT_TOKEN_INTERRUPT)
 291			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
 292			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
 293			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
 294
 295	net2272_ep_write(ep, EP_STAT1,
 296			    (1 << TIMEOUT)
 297			  | (1 << USB_OUT_ACK_SENT)
 298			  | (1 << USB_OUT_NAK_SENT)
 299			  | (1 << USB_IN_ACK_RCVD)
 300			  | (1 << USB_IN_NAK_SENT)
 301			  | (1 << USB_STALL_SENT)
 302			  | (1 << LOCAL_OUT_ZLP)
 303			  | (1 << BUFFER_FLUSH));
 304
 305	/* fifo size is handled seperately */
 306}
 307
 308static int net2272_disable(struct usb_ep *_ep)
 309{
 310	struct net2272_ep *ep;
 311	unsigned long flags;
 312
 313	ep = container_of(_ep, struct net2272_ep, ep);
 314	if (!_ep || !ep->desc || _ep->name == ep0name)
 315		return -EINVAL;
 316
 317	spin_lock_irqsave(&ep->dev->lock, flags);
 318	net2272_dequeue_all(ep);
 319	net2272_ep_reset(ep);
 320
 321	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
 322
 323	spin_unlock_irqrestore(&ep->dev->lock, flags);
 324	return 0;
 325}
 326
 327/*---------------------------------------------------------------------------*/
 328
 329static struct usb_request *
 330net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 331{
 332	struct net2272_ep *ep;
 333	struct net2272_request *req;
 334
 335	if (!_ep)
 336		return NULL;
 337	ep = container_of(_ep, struct net2272_ep, ep);
 338
 339	req = kzalloc(sizeof(*req), gfp_flags);
 340	if (!req)
 341		return NULL;
 342
 343	INIT_LIST_HEAD(&req->queue);
 344
 345	return &req->req;
 346}
 347
 348static void
 349net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 350{
 351	struct net2272_ep *ep;
 352	struct net2272_request *req;
 353
 354	ep = container_of(_ep, struct net2272_ep, ep);
 355	if (!_ep || !_req)
 356		return;
 357
 358	req = container_of(_req, struct net2272_request, req);
 359	WARN_ON(!list_empty(&req->queue));
 360	kfree(req);
 361}
 362
 363static void
 364net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
 365{
 366	struct net2272 *dev;
 367	unsigned stopped = ep->stopped;
 368
 369	if (ep->num == 0) {
 370		if (ep->dev->protocol_stall) {
 371			ep->stopped = 1;
 372			set_halt(ep);
 373		}
 374		allow_status(ep);
 375	}
 376
 377	list_del_init(&req->queue);
 378
 379	if (req->req.status == -EINPROGRESS)
 380		req->req.status = status;
 381	else
 382		status = req->req.status;
 383
 384	dev = ep->dev;
 385	if (use_dma && ep->dma)
 386		usb_gadget_unmap_request(&dev->gadget, &req->req,
 387				ep->is_in);
 388
 389	if (status && status != -ESHUTDOWN)
 390		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
 391			ep->ep.name, &req->req, status,
 392			req->req.actual, req->req.length, req->req.buf);
 393
 394	/* don't modify queue heads during completion callback */
 395	ep->stopped = 1;
 396	spin_unlock(&dev->lock);
 397	usb_gadget_giveback_request(&ep->ep, &req->req);
 398	spin_lock(&dev->lock);
 399	ep->stopped = stopped;
 400}
 401
 402static int
 403net2272_write_packet(struct net2272_ep *ep, u8 *buf,
 404	struct net2272_request *req, unsigned max)
 405{
 406	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 407	u16 *bufp;
 408	unsigned length, count;
 409	u8 tmp;
 410
 411	length = min(req->req.length - req->req.actual, max);
 412	req->req.actual += length;
 413
 414	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
 415		ep->ep.name, req, max, length,
 416		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 417
 418	count = length;
 419	bufp = (u16 *)buf;
 420
 421	while (likely(count >= 2)) {
 422		/* no byte-swap required; chip endian set during init */
 423		writew(*bufp++, ep_data);
 424		count -= 2;
 425	}
 426	buf = (u8 *)bufp;
 427
 428	/* write final byte by placing the NET2272 into 8-bit mode */
 429	if (unlikely(count)) {
 430		tmp = net2272_read(ep->dev, LOCCTL);
 431		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
 432		writeb(*buf, ep_data);
 433		net2272_write(ep->dev, LOCCTL, tmp);
 434	}
 435	return length;
 436}
 437
 438/* returns: 0: still running, 1: completed, negative: errno */
 439static int
 440net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
 441{
 442	u8 *buf;
 443	unsigned count, max;
 444	int status;
 445
 446	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
 447		ep->ep.name, req->req.actual, req->req.length);
 448
 449	/*
 450	 * Keep loading the endpoint until the final packet is loaded,
 451	 * or the endpoint buffer is full.
 452	 */
 453 top:
 454	/*
 455	 * Clear interrupt status
 456	 *  - Packet Transmitted interrupt will become set again when the
 457	 *    host successfully takes another packet
 458	 */
 459	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 460	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
 461		buf = req->req.buf + req->req.actual;
 462		prefetch(buf);
 463
 464		/* force pagesel */
 465		net2272_ep_read(ep, EP_STAT0);
 466
 467		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
 468			(net2272_ep_read(ep, EP_AVAIL0));
 469
 470		if (max < ep->ep.maxpacket)
 471			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 472				| (net2272_ep_read(ep, EP_AVAIL0));
 473
 474		count = net2272_write_packet(ep, buf, req, max);
 475		/* see if we are done */
 476		if (req->req.length == req->req.actual) {
 477			/* validate short or zlp packet */
 478			if (count < ep->ep.maxpacket)
 479				set_fifo_bytecount(ep, 0);
 480			net2272_done(ep, req, 0);
 481
 482			if (!list_empty(&ep->queue)) {
 483				req = list_entry(ep->queue.next,
 484						struct net2272_request,
 485						queue);
 486				status = net2272_kick_dma(ep, req);
 487
 488				if (status < 0)
 489					if ((net2272_ep_read(ep, EP_STAT0)
 490							& (1 << BUFFER_EMPTY)))
 491						goto top;
 492			}
 493			return 1;
 494		}
 495		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
 496	}
 497	return 0;
 498}
 499
 500static void
 501net2272_out_flush(struct net2272_ep *ep)
 502{
 503	ASSERT_OUT_NAKING(ep);
 504
 505	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
 506			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
 507	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
 508}
 509
 510static int
 511net2272_read_packet(struct net2272_ep *ep, u8 *buf,
 512	struct net2272_request *req, unsigned avail)
 513{
 514	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
 515	unsigned is_short;
 516	u16 *bufp;
 517
 518	req->req.actual += avail;
 519
 520	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
 521		ep->ep.name, req, avail,
 522		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
 523
 524	is_short = (avail < ep->ep.maxpacket);
 525
 526	if (unlikely(avail == 0)) {
 527		/* remove any zlp from the buffer */
 528		(void)readw(ep_data);
 529		return is_short;
 530	}
 531
 532	/* Ensure we get the final byte */
 533	if (unlikely(avail % 2))
 534		avail++;
 535	bufp = (u16 *)buf;
 536
 537	do {
 538		*bufp++ = readw(ep_data);
 539		avail -= 2;
 540	} while (avail);
 541
 542	/*
 543	 * To avoid false endpoint available race condition must read
 544	 * ep stat0 twice in the case of a short transfer
 545	 */
 546	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
 547		net2272_ep_read(ep, EP_STAT0);
 548
 549	return is_short;
 550}
 551
 552static int
 553net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
 554{
 555	u8 *buf;
 556	unsigned is_short;
 557	int count;
 558	int tmp;
 559	int cleanup = 0;
 560	int status = -1;
 561
 562	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
 563		ep->ep.name, req->req.actual, req->req.length);
 564
 565 top:
 566	do {
 567		buf = req->req.buf + req->req.actual;
 568		prefetchw(buf);
 569
 570		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
 571			| net2272_ep_read(ep, EP_AVAIL0);
 572
 573		net2272_ep_write(ep, EP_STAT0,
 574			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
 575			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
 576
 577		tmp = req->req.length - req->req.actual;
 578
 579		if (count > tmp) {
 580			if ((tmp % ep->ep.maxpacket) != 0) {
 581				dev_err(ep->dev->dev,
 582					"%s out fifo %d bytes, expected %d\n",
 583					ep->ep.name, count, tmp);
 584				cleanup = 1;
 585			}
 586			count = (tmp > 0) ? tmp : 0;
 587		}
 588
 589		is_short = net2272_read_packet(ep, buf, req, count);
 590
 591		/* completion */
 592		if (unlikely(cleanup || is_short ||
 593				((req->req.actual == req->req.length)
 594				 && !req->req.zero))) {
 595
 596			if (cleanup) {
 597				net2272_out_flush(ep);
 598				net2272_done(ep, req, -EOVERFLOW);
 599			} else
 600				net2272_done(ep, req, 0);
 601
 602			/* re-initialize endpoint transfer registers
 603			 * otherwise they may result in erroneous pre-validation
 604			 * for subsequent control reads
 605			 */
 606			if (unlikely(ep->num == 0)) {
 607				net2272_ep_write(ep, EP_TRANSFER2, 0);
 608				net2272_ep_write(ep, EP_TRANSFER1, 0);
 609				net2272_ep_write(ep, EP_TRANSFER0, 0);
 610			}
 611
 612			if (!list_empty(&ep->queue)) {
 
 
 613				req = list_entry(ep->queue.next,
 614					struct net2272_request, queue);
 615				status = net2272_kick_dma(ep, req);
 616				if ((status < 0) &&
 617				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
 618					goto top;
 619			}
 620			return 1;
 621		}
 622	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
 623
 624	return 0;
 625}
 626
 627static void
 628net2272_pio_advance(struct net2272_ep *ep)
 629{
 630	struct net2272_request *req;
 631
 632	if (unlikely(list_empty(&ep->queue)))
 633		return;
 634
 635	req = list_entry(ep->queue.next, struct net2272_request, queue);
 636	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
 637}
 638
 639/* returns 0 on success, else negative errno */
 640static int
 641net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
 642	unsigned len, unsigned dir)
 643{
 644	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
 645		ep, buf, len, dir);
 646
 647	/* The NET2272 only supports a single dma channel */
 648	if (dev->dma_busy)
 649		return -EBUSY;
 650	/*
 651	 * EP_TRANSFER (used to determine the number of bytes received
 652	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
 653	 */
 654	if ((dir == 1) && (len > 0x1000000))
 655		return -EINVAL;
 656
 657	dev->dma_busy = 1;
 658
 659	/* initialize platform's dma */
 660#ifdef CONFIG_PCI
 661	/* NET2272 addr, buffer addr, length, etc. */
 662	switch (dev->dev_id) {
 663	case PCI_DEVICE_ID_RDK1:
 664		/* Setup PLX 9054 DMA mode */
 665		writel((1 << LOCAL_BUS_WIDTH) |
 666			(1 << TA_READY_INPUT_ENABLE) |
 667			(0 << LOCAL_BURST_ENABLE) |
 668			(1 << DONE_INTERRUPT_ENABLE) |
 669			(1 << LOCAL_ADDRESSING_MODE) |
 670			(1 << DEMAND_MODE) |
 671			(1 << DMA_EOT_ENABLE) |
 672			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
 673			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
 674			dev->rdk1.plx9054_base_addr + DMAMODE0);
 675
 676		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
 677		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
 678		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
 679		writel((dir << DIRECTION_OF_TRANSFER) |
 680			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
 681			dev->rdk1.plx9054_base_addr + DMADPR0);
 682		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
 683			readl(dev->rdk1.plx9054_base_addr + INTCSR),
 684			dev->rdk1.plx9054_base_addr + INTCSR);
 685
 686		break;
 687	}
 688#endif
 689
 690	net2272_write(dev, DMAREQ,
 691		(0 << DMA_BUFFER_VALID) |
 692		(1 << DMA_REQUEST_ENABLE) |
 693		(1 << DMA_CONTROL_DACK) |
 694		(dev->dma_eot_polarity << EOT_POLARITY) |
 695		(dev->dma_dack_polarity << DACK_POLARITY) |
 696		(dev->dma_dreq_polarity << DREQ_POLARITY) |
 697		((ep >> 1) << DMA_ENDPOINT_SELECT));
 698
 699	(void) net2272_read(dev, SCRATCH);
 700
 701	return 0;
 702}
 703
 704static void
 705net2272_start_dma(struct net2272 *dev)
 706{
 707	/* start platform's dma controller */
 708#ifdef CONFIG_PCI
 709	switch (dev->dev_id) {
 710	case PCI_DEVICE_ID_RDK1:
 711		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
 712			dev->rdk1.plx9054_base_addr + DMACSR0);
 713		break;
 714	}
 715#endif
 716}
 717
 718/* returns 0 on success, else negative errno */
 719static int
 720net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
 721{
 722	unsigned size;
 723	u8 tmp;
 724
 725	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
 726		return -EINVAL;
 727
 728	/* don't use dma for odd-length transfers
 729	 * otherwise, we'd need to deal with the last byte with pio
 730	 */
 731	if (req->req.length & 1)
 732		return -EINVAL;
 733
 734	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
 735		ep->ep.name, req, (unsigned long long) req->req.dma);
 736
 737	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
 738
 739	/* The NET2272 can only use DMA on one endpoint at a time */
 740	if (ep->dev->dma_busy)
 741		return -EBUSY;
 742
 743	/* Make sure we only DMA an even number of bytes (we'll use
 744	 * pio to complete the transfer)
 745	 */
 746	size = req->req.length;
 747	size &= ~1;
 748
 749	/* device-to-host transfer */
 750	if (ep->is_in) {
 751		/* initialize platform's dma controller */
 752		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
 753			/* unable to obtain DMA channel; return error and use pio mode */
 754			return -EBUSY;
 755		req->req.actual += size;
 756
 757	/* host-to-device transfer */
 758	} else {
 759		tmp = net2272_ep_read(ep, EP_STAT0);
 760
 761		/* initialize platform's dma controller */
 762		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
 763			/* unable to obtain DMA channel; return error and use pio mode */
 764			return -EBUSY;
 765
 766		if (!(tmp & (1 << BUFFER_EMPTY)))
 767			ep->not_empty = 1;
 768		else
 769			ep->not_empty = 0;
 770
 771
 772		/* allow the endpoint's buffer to fill */
 773		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 774
 775		/* this transfer completed and data's already in the fifo
 776		 * return error so pio gets used.
 777		 */
 778		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
 779
 780			/* deassert dreq */
 781			net2272_write(ep->dev, DMAREQ,
 782				(0 << DMA_BUFFER_VALID) |
 783				(0 << DMA_REQUEST_ENABLE) |
 784				(1 << DMA_CONTROL_DACK) |
 785				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
 786				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
 787				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
 788				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
 789
 790			return -EBUSY;
 791		}
 792	}
 793
 794	/* Don't use per-packet interrupts: use dma interrupts only */
 795	net2272_ep_write(ep, EP_IRQENB, 0);
 796
 797	net2272_start_dma(ep->dev);
 798
 799	return 0;
 800}
 801
 802static void net2272_cancel_dma(struct net2272 *dev)
 803{
 804#ifdef CONFIG_PCI
 805	switch (dev->dev_id) {
 806	case PCI_DEVICE_ID_RDK1:
 807		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
 808		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
 809		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
 810		         (1 << CHANNEL_DONE)))
 811			continue;	/* wait for dma to stabalize */
 812
 813		/* dma abort generates an interrupt */
 814		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
 815			dev->rdk1.plx9054_base_addr + DMACSR0);
 816		break;
 817	}
 818#endif
 819
 820	dev->dma_busy = 0;
 821}
 822
 823/*---------------------------------------------------------------------------*/
 824
 825static int
 826net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 827{
 828	struct net2272_request *req;
 829	struct net2272_ep *ep;
 830	struct net2272 *dev;
 831	unsigned long flags;
 832	int status = -1;
 833	u8 s;
 834
 835	req = container_of(_req, struct net2272_request, req);
 836	if (!_req || !_req->complete || !_req->buf
 837			|| !list_empty(&req->queue))
 838		return -EINVAL;
 839	ep = container_of(_ep, struct net2272_ep, ep);
 840	if (!_ep || (!ep->desc && ep->num != 0))
 841		return -EINVAL;
 842	dev = ep->dev;
 843	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 844		return -ESHUTDOWN;
 845
 846	/* set up dma mapping in case the caller didn't */
 847	if (use_dma && ep->dma) {
 848		status = usb_gadget_map_request(&dev->gadget, _req,
 849				ep->is_in);
 850		if (status)
 851			return status;
 852	}
 853
 854	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
 855		_ep->name, _req, _req->length, _req->buf,
 856		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
 857
 858	spin_lock_irqsave(&dev->lock, flags);
 859
 860	_req->status = -EINPROGRESS;
 861	_req->actual = 0;
 862
 863	/* kickstart this i/o queue? */
 864	if (list_empty(&ep->queue) && !ep->stopped) {
 865		/* maybe there's no control data, just status ack */
 866		if (ep->num == 0 && _req->length == 0) {
 867			net2272_done(ep, req, 0);
 868			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
 869			goto done;
 870		}
 871
 872		/* Return zlp, don't let it block subsequent packets */
 873		s = net2272_ep_read(ep, EP_STAT0);
 874		if (s & (1 << BUFFER_EMPTY)) {
 875			/* Buffer is empty check for a blocking zlp, handle it */
 876			if ((s & (1 << NAK_OUT_PACKETS)) &&
 877			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
 878				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
 879				/*
 880				 * Request is going to terminate with a short packet ...
 881				 * hope the client is ready for it!
 882				 */
 883				status = net2272_read_fifo(ep, req);
 884				/* clear short packet naking */
 885				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
 886				goto done;
 887			}
 888		}
 889
 890		/* try dma first */
 891		status = net2272_kick_dma(ep, req);
 892
 893		if (status < 0) {
 894			/* dma failed (most likely in use by another endpoint)
 895			 * fallback to pio
 896			 */
 897			status = 0;
 898
 899			if (ep->is_in)
 900				status = net2272_write_fifo(ep, req);
 901			else {
 902				s = net2272_ep_read(ep, EP_STAT0);
 903				if ((s & (1 << BUFFER_EMPTY)) == 0)
 904					status = net2272_read_fifo(ep, req);
 905			}
 906
 907			if (unlikely(status != 0)) {
 908				if (status > 0)
 909					status = 0;
 910				req = NULL;
 911			}
 912		}
 913	}
 914	if (likely(req))
 915		list_add_tail(&req->queue, &ep->queue);
 916
 917	if (likely(!list_empty(&ep->queue)))
 918		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
 919 done:
 920	spin_unlock_irqrestore(&dev->lock, flags);
 921
 922	return 0;
 923}
 924
 925/* dequeue ALL requests */
 926static void
 927net2272_dequeue_all(struct net2272_ep *ep)
 928{
 929	struct net2272_request *req;
 930
 931	/* called with spinlock held */
 932	ep->stopped = 1;
 933
 934	while (!list_empty(&ep->queue)) {
 935		req = list_entry(ep->queue.next,
 936				struct net2272_request,
 937				queue);
 938		net2272_done(ep, req, -ESHUTDOWN);
 939	}
 940}
 941
 942/* dequeue JUST ONE request */
 943static int
 944net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 945{
 946	struct net2272_ep *ep;
 947	struct net2272_request *req;
 948	unsigned long flags;
 949	int stopped;
 950
 951	ep = container_of(_ep, struct net2272_ep, ep);
 952	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
 953		return -EINVAL;
 954
 955	spin_lock_irqsave(&ep->dev->lock, flags);
 956	stopped = ep->stopped;
 957	ep->stopped = 1;
 958
 959	/* make sure it's still queued on this endpoint */
 960	list_for_each_entry(req, &ep->queue, queue) {
 961		if (&req->req == _req)
 962			break;
 
 
 963	}
 964	if (&req->req != _req) {
 
 965		spin_unlock_irqrestore(&ep->dev->lock, flags);
 966		return -EINVAL;
 967	}
 968
 969	/* queue head may be partially complete */
 970	if (ep->queue.next == &req->queue) {
 971		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
 972		net2272_done(ep, req, -ECONNRESET);
 973	}
 974	req = NULL;
 975	ep->stopped = stopped;
 976
 977	spin_unlock_irqrestore(&ep->dev->lock, flags);
 978	return 0;
 979}
 980
 981/*---------------------------------------------------------------------------*/
 982
 983static int
 984net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
 985{
 986	struct net2272_ep *ep;
 987	unsigned long flags;
 988	int ret = 0;
 989
 990	ep = container_of(_ep, struct net2272_ep, ep);
 991	if (!_ep || (!ep->desc && ep->num != 0))
 992		return -EINVAL;
 993	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 994		return -ESHUTDOWN;
 995	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
 996		return -EINVAL;
 997
 998	spin_lock_irqsave(&ep->dev->lock, flags);
 999	if (!list_empty(&ep->queue))
1000		ret = -EAGAIN;
1001	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1002		ret = -EAGAIN;
1003	else {
1004		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1005			value ? "set" : "clear",
1006			wedged ? "wedge" : "halt");
1007		/* set/clear */
1008		if (value) {
1009			if (ep->num == 0)
1010				ep->dev->protocol_stall = 1;
1011			else
1012				set_halt(ep);
1013			if (wedged)
1014				ep->wedged = 1;
1015		} else {
1016			clear_halt(ep);
1017			ep->wedged = 0;
1018		}
1019	}
1020	spin_unlock_irqrestore(&ep->dev->lock, flags);
1021
1022	return ret;
1023}
1024
1025static int
1026net2272_set_halt(struct usb_ep *_ep, int value)
1027{
1028	return net2272_set_halt_and_wedge(_ep, value, 0);
1029}
1030
1031static int
1032net2272_set_wedge(struct usb_ep *_ep)
1033{
1034	if (!_ep || _ep->name == ep0name)
1035		return -EINVAL;
1036	return net2272_set_halt_and_wedge(_ep, 1, 1);
1037}
1038
1039static int
1040net2272_fifo_status(struct usb_ep *_ep)
1041{
1042	struct net2272_ep *ep;
1043	u16 avail;
1044
1045	ep = container_of(_ep, struct net2272_ep, ep);
1046	if (!_ep || (!ep->desc && ep->num != 0))
1047		return -ENODEV;
1048	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1049		return -ESHUTDOWN;
1050
1051	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1052	avail |= net2272_ep_read(ep, EP_AVAIL0);
1053	if (avail > ep->fifo_size)
1054		return -EOVERFLOW;
1055	if (ep->is_in)
1056		avail = ep->fifo_size - avail;
1057	return avail;
1058}
1059
1060static void
1061net2272_fifo_flush(struct usb_ep *_ep)
1062{
1063	struct net2272_ep *ep;
1064
1065	ep = container_of(_ep, struct net2272_ep, ep);
1066	if (!_ep || (!ep->desc && ep->num != 0))
1067		return;
1068	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1069		return;
1070
1071	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1072}
1073
1074static struct usb_ep_ops net2272_ep_ops = {
1075	.enable        = net2272_enable,
1076	.disable       = net2272_disable,
1077
1078	.alloc_request = net2272_alloc_request,
1079	.free_request  = net2272_free_request,
1080
1081	.queue         = net2272_queue,
1082	.dequeue       = net2272_dequeue,
1083
1084	.set_halt      = net2272_set_halt,
1085	.set_wedge     = net2272_set_wedge,
1086	.fifo_status   = net2272_fifo_status,
1087	.fifo_flush    = net2272_fifo_flush,
1088};
1089
1090/*---------------------------------------------------------------------------*/
1091
1092static int
1093net2272_get_frame(struct usb_gadget *_gadget)
1094{
1095	struct net2272 *dev;
1096	unsigned long flags;
1097	u16 ret;
1098
1099	if (!_gadget)
1100		return -ENODEV;
1101	dev = container_of(_gadget, struct net2272, gadget);
1102	spin_lock_irqsave(&dev->lock, flags);
1103
1104	ret = net2272_read(dev, FRAME1) << 8;
1105	ret |= net2272_read(dev, FRAME0);
1106
1107	spin_unlock_irqrestore(&dev->lock, flags);
1108	return ret;
1109}
1110
1111static int
1112net2272_wakeup(struct usb_gadget *_gadget)
1113{
1114	struct net2272 *dev;
1115	u8 tmp;
1116	unsigned long flags;
1117
1118	if (!_gadget)
1119		return 0;
1120	dev = container_of(_gadget, struct net2272, gadget);
1121
1122	spin_lock_irqsave(&dev->lock, flags);
1123	tmp = net2272_read(dev, USBCTL0);
1124	if (tmp & (1 << IO_WAKEUP_ENABLE))
1125		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1126
1127	spin_unlock_irqrestore(&dev->lock, flags);
1128
1129	return 0;
1130}
1131
1132static int
1133net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1134{
1135	if (!_gadget)
1136		return -ENODEV;
1137
1138	_gadget->is_selfpowered = (value != 0);
1139
1140	return 0;
1141}
1142
1143static int
1144net2272_pullup(struct usb_gadget *_gadget, int is_on)
1145{
1146	struct net2272 *dev;
1147	u8 tmp;
1148	unsigned long flags;
1149
1150	if (!_gadget)
1151		return -ENODEV;
1152	dev = container_of(_gadget, struct net2272, gadget);
1153
1154	spin_lock_irqsave(&dev->lock, flags);
1155	tmp = net2272_read(dev, USBCTL0);
1156	dev->softconnect = (is_on != 0);
1157	if (is_on)
1158		tmp |= (1 << USB_DETECT_ENABLE);
1159	else
1160		tmp &= ~(1 << USB_DETECT_ENABLE);
1161	net2272_write(dev, USBCTL0, tmp);
1162	spin_unlock_irqrestore(&dev->lock, flags);
1163
1164	return 0;
1165}
1166
1167static int net2272_start(struct usb_gadget *_gadget,
1168		struct usb_gadget_driver *driver);
1169static int net2272_stop(struct usb_gadget *_gadget);
 
1170
1171static const struct usb_gadget_ops net2272_ops = {
1172	.get_frame	= net2272_get_frame,
1173	.wakeup		= net2272_wakeup,
1174	.set_selfpowered = net2272_set_selfpowered,
1175	.pullup		= net2272_pullup,
1176	.udc_start	= net2272_start,
1177	.udc_stop	= net2272_stop,
 
1178};
1179
1180/*---------------------------------------------------------------------------*/
1181
1182static ssize_t
1183registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1184{
1185	struct net2272 *dev;
1186	char *next;
1187	unsigned size, t;
1188	unsigned long flags;
1189	u8 t1, t2;
1190	int i;
1191	const char *s;
1192
1193	dev = dev_get_drvdata(_dev);
1194	next = buf;
1195	size = PAGE_SIZE;
1196	spin_lock_irqsave(&dev->lock, flags);
1197
1198	if (dev->driver)
1199		s = dev->driver->driver.name;
1200	else
1201		s = "(none)";
1202
1203	/* Main Control Registers */
1204	t = scnprintf(next, size, "%s version %s,"
1205		"chiprev %02x, locctl %02x\n"
1206		"irqenb0 %02x irqenb1 %02x "
1207		"irqstat0 %02x irqstat1 %02x\n",
1208		driver_name, driver_vers, dev->chiprev,
1209		net2272_read(dev, LOCCTL),
1210		net2272_read(dev, IRQENB0),
1211		net2272_read(dev, IRQENB1),
1212		net2272_read(dev, IRQSTAT0),
1213		net2272_read(dev, IRQSTAT1));
1214	size -= t;
1215	next += t;
1216
1217	/* DMA */
1218	t1 = net2272_read(dev, DMAREQ);
1219	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1220		t1, ep_name[(t1 & 0x01) + 1],
1221		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1222		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1223		t1 & (1 << DMA_REQUEST) ? "req " : "",
1224		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1225	size -= t;
1226	next += t;
1227
1228	/* USB Control Registers */
1229	t1 = net2272_read(dev, USBCTL1);
1230	if (t1 & (1 << VBUS_PIN)) {
1231		if (t1 & (1 << USB_HIGH_SPEED))
1232			s = "high speed";
1233		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1234			s = "powered";
1235		else
1236			s = "full speed";
1237	} else
1238		s = "not attached";
1239	t = scnprintf(next, size,
1240		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1241		net2272_read(dev, USBCTL0), t1,
1242		net2272_read(dev, OURADDR), s);
1243	size -= t;
1244	next += t;
1245
1246	/* Endpoint Registers */
1247	for (i = 0; i < 4; ++i) {
1248		struct net2272_ep *ep;
1249
1250		ep = &dev->ep[i];
1251		if (i && !ep->desc)
1252			continue;
1253
1254		t1 = net2272_ep_read(ep, EP_CFG);
1255		t2 = net2272_ep_read(ep, EP_RSPSET);
1256		t = scnprintf(next, size,
1257			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1258			"irqenb %02x\n",
1259			ep->ep.name, t1, t2,
1260			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1261			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1262			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1263			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1264			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1265			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1266			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1267			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1268			net2272_ep_read(ep, EP_IRQENB));
1269		size -= t;
1270		next += t;
1271
1272		t = scnprintf(next, size,
1273			"\tstat0 %02x stat1 %02x avail %04x "
1274			"(ep%d%s-%s)%s\n",
1275			net2272_ep_read(ep, EP_STAT0),
1276			net2272_ep_read(ep, EP_STAT1),
1277			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1278			t1 & 0x0f,
1279			ep->is_in ? "in" : "out",
1280			type_string(t1 >> 5),
1281			ep->stopped ? "*" : "");
1282		size -= t;
1283		next += t;
1284
1285		t = scnprintf(next, size,
1286			"\tep_transfer %06x\n",
1287			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1288			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1289			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1290		size -= t;
1291		next += t;
1292
1293		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1294		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1295		t = scnprintf(next, size,
1296			"\tbuf-a %s buf-b %s\n",
1297			buf_state_string(t1),
1298			buf_state_string(t2));
1299		size -= t;
1300		next += t;
1301	}
1302
1303	spin_unlock_irqrestore(&dev->lock, flags);
1304
1305	return PAGE_SIZE - size;
1306}
1307static DEVICE_ATTR_RO(registers);
1308
1309/*---------------------------------------------------------------------------*/
1310
1311static void
1312net2272_set_fifo_mode(struct net2272 *dev, int mode)
1313{
1314	u8 tmp;
1315
1316	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1317	tmp |= (mode << 6);
1318	net2272_write(dev, LOCCTL, tmp);
1319
1320	INIT_LIST_HEAD(&dev->gadget.ep_list);
1321
1322	/* always ep-a, ep-c ... maybe not ep-b */
1323	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1324
1325	switch (mode) {
1326	case 0:
1327		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1328		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1329		break;
1330	case 1:
1331		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1332		dev->ep[1].fifo_size = 1024;
1333		dev->ep[2].fifo_size = 512;
1334		break;
1335	case 2:
1336		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1337		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1338		break;
1339	case 3:
1340		dev->ep[1].fifo_size = 1024;
1341		break;
1342	}
1343
1344	/* ep-c is always 2 512 byte buffers */
1345	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1346	dev->ep[3].fifo_size = 512;
1347}
1348
1349/*---------------------------------------------------------------------------*/
1350
1351static void
1352net2272_usb_reset(struct net2272 *dev)
1353{
1354	dev->gadget.speed = USB_SPEED_UNKNOWN;
1355
1356	net2272_cancel_dma(dev);
1357
1358	net2272_write(dev, IRQENB0, 0);
1359	net2272_write(dev, IRQENB1, 0);
1360
1361	/* clear irq state */
1362	net2272_write(dev, IRQSTAT0, 0xff);
1363	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1364
1365	net2272_write(dev, DMAREQ,
1366		(0 << DMA_BUFFER_VALID) |
1367		(0 << DMA_REQUEST_ENABLE) |
1368		(1 << DMA_CONTROL_DACK) |
1369		(dev->dma_eot_polarity << EOT_POLARITY) |
1370		(dev->dma_dack_polarity << DACK_POLARITY) |
1371		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1372		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1373
1374	net2272_cancel_dma(dev);
1375	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1376
1377	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1378	 * note that the higher level gadget drivers are expected to convert data to little endian.
1379	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1380	 */
1381	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1382	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1383}
1384
1385static void
1386net2272_usb_reinit(struct net2272 *dev)
1387{
1388	int i;
1389
1390	/* basic endpoint init */
1391	for (i = 0; i < 4; ++i) {
1392		struct net2272_ep *ep = &dev->ep[i];
1393
1394		ep->ep.name = ep_name[i];
1395		ep->dev = dev;
1396		ep->num = i;
1397		ep->not_empty = 0;
1398
1399		if (use_dma && ep->num == dma_ep)
1400			ep->dma = 1;
1401
1402		if (i > 0 && i <= 3)
1403			ep->fifo_size = 512;
1404		else
1405			ep->fifo_size = 64;
1406		net2272_ep_reset(ep);
1407
1408		if (i == 0) {
1409			ep->ep.caps.type_control = true;
1410		} else {
1411			ep->ep.caps.type_iso = true;
1412			ep->ep.caps.type_bulk = true;
1413			ep->ep.caps.type_int = true;
1414		}
1415
1416		ep->ep.caps.dir_in = true;
1417		ep->ep.caps.dir_out = true;
1418	}
1419	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1420
1421	dev->gadget.ep0 = &dev->ep[0].ep;
1422	dev->ep[0].stopped = 0;
1423	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1424}
1425
1426static void
1427net2272_ep0_start(struct net2272 *dev)
1428{
1429	struct net2272_ep *ep0 = &dev->ep[0];
1430
1431	net2272_ep_write(ep0, EP_RSPSET,
1432		(1 << NAK_OUT_PACKETS_MODE) |
1433		(1 << ALT_NAK_OUT_PACKETS));
1434	net2272_ep_write(ep0, EP_RSPCLR,
1435		(1 << HIDE_STATUS_PHASE) |
1436		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1437	net2272_write(dev, USBCTL0,
1438		(dev->softconnect << USB_DETECT_ENABLE) |
1439		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1440		(1 << IO_WAKEUP_ENABLE));
1441	net2272_write(dev, IRQENB0,
1442		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1443		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1444		(1 << DMA_DONE_INTERRUPT_ENABLE));
1445	net2272_write(dev, IRQENB1,
1446		(1 << VBUS_INTERRUPT_ENABLE) |
1447		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1448		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1449}
1450
1451/* when a driver is successfully registered, it will receive
1452 * control requests including set_configuration(), which enables
1453 * non-control requests.  then usb traffic follows until a
1454 * disconnect is reported.  then a host may connect again, or
1455 * the driver might get unbound.
1456 */
1457static int net2272_start(struct usb_gadget *_gadget,
1458		struct usb_gadget_driver *driver)
1459{
1460	struct net2272 *dev;
1461	unsigned i;
1462
1463	if (!driver || !driver->setup ||
1464	    driver->max_speed != USB_SPEED_HIGH)
1465		return -EINVAL;
1466
1467	dev = container_of(_gadget, struct net2272, gadget);
1468
1469	for (i = 0; i < 4; ++i)
1470		dev->ep[i].irqs = 0;
1471	/* hook up the driver ... */
1472	dev->softconnect = 1;
1473	driver->driver.bus = NULL;
1474	dev->driver = driver;
1475
1476	/* ... then enable host detection and ep0; and we're ready
1477	 * for set_configuration as well as eventual disconnect.
1478	 */
1479	net2272_ep0_start(dev);
1480
1481	return 0;
1482}
1483
1484static void
1485stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1486{
1487	int i;
1488
1489	/* don't disconnect if it's not connected */
1490	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1491		driver = NULL;
1492
1493	/* stop hardware; prevent new request submissions;
1494	 * and kill any outstanding requests.
1495	 */
1496	net2272_usb_reset(dev);
1497	for (i = 0; i < 4; ++i)
1498		net2272_dequeue_all(&dev->ep[i]);
1499
1500	/* report disconnect; the driver is already quiesced */
1501	if (driver) {
1502		spin_unlock(&dev->lock);
1503		driver->disconnect(&dev->gadget);
1504		spin_lock(&dev->lock);
1505	}
1506
1507	net2272_usb_reinit(dev);
1508}
1509
1510static int net2272_stop(struct usb_gadget *_gadget)
1511{
1512	struct net2272 *dev;
1513	unsigned long flags;
1514
1515	dev = container_of(_gadget, struct net2272, gadget);
1516
1517	spin_lock_irqsave(&dev->lock, flags);
1518	stop_activity(dev, NULL);
1519	spin_unlock_irqrestore(&dev->lock, flags);
1520
1521	dev->driver = NULL;
1522
1523	return 0;
1524}
1525
 
 
 
 
 
 
 
 
 
1526/*---------------------------------------------------------------------------*/
1527/* handle ep-a/ep-b dma completions */
1528static void
1529net2272_handle_dma(struct net2272_ep *ep)
1530{
1531	struct net2272_request *req;
1532	unsigned len;
1533	int status;
1534
1535	if (!list_empty(&ep->queue))
1536		req = list_entry(ep->queue.next,
1537				struct net2272_request, queue);
1538	else
1539		req = NULL;
1540
1541	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1542
1543	/* Ensure DREQ is de-asserted */
1544	net2272_write(ep->dev, DMAREQ,
1545		(0 << DMA_BUFFER_VALID)
1546	      | (0 << DMA_REQUEST_ENABLE)
1547	      | (1 << DMA_CONTROL_DACK)
1548	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1549	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1550	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1551	      | (ep->dma << DMA_ENDPOINT_SELECT));
1552
1553	ep->dev->dma_busy = 0;
1554
1555	net2272_ep_write(ep, EP_IRQENB,
1556		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1557		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1558		| net2272_ep_read(ep, EP_IRQENB));
1559
1560	/* device-to-host transfer completed */
1561	if (ep->is_in) {
1562		/* validate a short packet or zlp if necessary */
1563		if ((req->req.length % ep->ep.maxpacket != 0) ||
1564				req->req.zero)
1565			set_fifo_bytecount(ep, 0);
1566
1567		net2272_done(ep, req, 0);
1568		if (!list_empty(&ep->queue)) {
1569			req = list_entry(ep->queue.next,
1570					struct net2272_request, queue);
1571			status = net2272_kick_dma(ep, req);
1572			if (status < 0)
1573				net2272_pio_advance(ep);
1574		}
1575
1576	/* host-to-device transfer completed */
1577	} else {
1578		/* terminated with a short packet? */
1579		if (net2272_read(ep->dev, IRQSTAT0) &
1580				(1 << DMA_DONE_INTERRUPT)) {
1581			/* abort system dma */
1582			net2272_cancel_dma(ep->dev);
1583		}
1584
1585		/* EP_TRANSFER will contain the number of bytes
1586		 * actually received.
1587		 * NOTE: There is no overflow detection on EP_TRANSFER:
1588		 * We can't deal with transfers larger than 2^24 bytes!
1589		 */
1590		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1591			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1592			| (net2272_ep_read(ep, EP_TRANSFER0));
1593
1594		if (ep->not_empty)
1595			len += 4;
1596
1597		req->req.actual += len;
1598
1599		/* get any remaining data */
1600		net2272_pio_advance(ep);
1601	}
1602}
1603
1604/*---------------------------------------------------------------------------*/
1605
1606static void
1607net2272_handle_ep(struct net2272_ep *ep)
1608{
1609	struct net2272_request *req;
1610	u8 stat0, stat1;
1611
1612	if (!list_empty(&ep->queue))
1613		req = list_entry(ep->queue.next,
1614			struct net2272_request, queue);
1615	else
1616		req = NULL;
1617
1618	/* ack all, and handle what we care about */
1619	stat0 = net2272_ep_read(ep, EP_STAT0);
1620	stat1 = net2272_ep_read(ep, EP_STAT1);
1621	ep->irqs++;
1622
1623	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1624		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1625
1626	net2272_ep_write(ep, EP_STAT0, stat0 &
1627		~((1 << NAK_OUT_PACKETS)
1628		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1629	net2272_ep_write(ep, EP_STAT1, stat1);
1630
1631	/* data packet(s) received (in the fifo, OUT)
1632	 * direction must be validated, otherwise control read status phase
1633	 * could be interpreted as a valid packet
1634	 */
1635	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1636		net2272_pio_advance(ep);
1637	/* data packet(s) transmitted (IN) */
1638	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1639		net2272_pio_advance(ep);
1640}
1641
1642static struct net2272_ep *
1643net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1644{
1645	struct net2272_ep *ep;
1646
1647	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1648		return &dev->ep[0];
1649
1650	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1651		u8 bEndpointAddress;
1652
1653		if (!ep->desc)
1654			continue;
1655		bEndpointAddress = ep->desc->bEndpointAddress;
1656		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1657			continue;
1658		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1659			return ep;
1660	}
1661	return NULL;
1662}
1663
1664/*
1665 * USB Test Packet:
1666 * JKJKJKJK * 9
1667 * JJKKJJKK * 8
1668 * JJJJKKKK * 8
1669 * JJJJJJJKKKKKKK * 8
1670 * JJJJJJJK * 8
1671 * {JKKKKKKK * 10}, JK
1672 */
1673static const u8 net2272_test_packet[] = {
1674	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1675	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1676	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1677	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1678	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1679	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1680};
1681
1682static void
1683net2272_set_test_mode(struct net2272 *dev, int mode)
1684{
1685	int i;
1686
1687	/* Disable all net2272 interrupts:
1688	 * Nothing but a power cycle should stop the test.
1689	 */
1690	net2272_write(dev, IRQENB0, 0x00);
1691	net2272_write(dev, IRQENB1, 0x00);
1692
1693	/* Force tranceiver to high-speed */
1694	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1695
1696	net2272_write(dev, PAGESEL, 0);
1697	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1698	net2272_write(dev, EP_RSPCLR,
1699			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1700			| (1 << HIDE_STATUS_PHASE));
1701	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1702	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1703
1704	/* wait for status phase to complete */
1705	while (!(net2272_read(dev, EP_STAT0) &
1706				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1707		;
1708
1709	/* Enable test mode */
1710	net2272_write(dev, USBTEST, mode);
1711
1712	/* load test packet */
1713	if (mode == TEST_PACKET) {
1714		/* switch to 8 bit mode */
1715		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1716				~(1 << DATA_WIDTH));
1717
1718		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1719			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1720
1721		/* Validate test packet */
1722		net2272_write(dev, EP_TRANSFER0, 0);
1723	}
1724}
1725
1726static void
1727net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1728{
1729	struct net2272_ep *ep;
1730	u8 num, scratch;
1731
1732	/* starting a control request? */
1733	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1734		union {
1735			u8 raw[8];
1736			struct usb_ctrlrequest	r;
1737		} u;
1738		int tmp = 0;
1739		struct net2272_request *req;
1740
1741		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1742			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1743				dev->gadget.speed = USB_SPEED_HIGH;
1744			else
1745				dev->gadget.speed = USB_SPEED_FULL;
1746			dev_dbg(dev->dev, "%s\n",
1747				usb_speed_string(dev->gadget.speed));
1748		}
1749
1750		ep = &dev->ep[0];
1751		ep->irqs++;
1752
1753		/* make sure any leftover interrupt state is cleared */
1754		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1755		while (!list_empty(&ep->queue)) {
1756			req = list_entry(ep->queue.next,
1757				struct net2272_request, queue);
1758			net2272_done(ep, req,
1759				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1760		}
1761		ep->stopped = 0;
1762		dev->protocol_stall = 0;
1763		net2272_ep_write(ep, EP_STAT0,
1764			    (1 << DATA_IN_TOKEN_INTERRUPT)
1765			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1766			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1767			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1768			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1769		net2272_ep_write(ep, EP_STAT1,
1770			    (1 << TIMEOUT)
1771			  | (1 << USB_OUT_ACK_SENT)
1772			  | (1 << USB_OUT_NAK_SENT)
1773			  | (1 << USB_IN_ACK_RCVD)
1774			  | (1 << USB_IN_NAK_SENT)
1775			  | (1 << USB_STALL_SENT)
1776			  | (1 << LOCAL_OUT_ZLP));
1777
1778		/*
1779		 * Ensure Control Read pre-validation setting is beyond maximum size
1780		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1781		 *    an EP0 transfer following the Control Write is a Control Read,
1782		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1783		 *    pre-validation count.
1784		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1785		 *    the pre-validation count cannot cause an unexpected validatation
1786		 */
1787		net2272_write(dev, PAGESEL, 0);
1788		net2272_write(dev, EP_TRANSFER2, 0xff);
1789		net2272_write(dev, EP_TRANSFER1, 0xff);
1790		net2272_write(dev, EP_TRANSFER0, 0xff);
1791
1792		u.raw[0] = net2272_read(dev, SETUP0);
1793		u.raw[1] = net2272_read(dev, SETUP1);
1794		u.raw[2] = net2272_read(dev, SETUP2);
1795		u.raw[3] = net2272_read(dev, SETUP3);
1796		u.raw[4] = net2272_read(dev, SETUP4);
1797		u.raw[5] = net2272_read(dev, SETUP5);
1798		u.raw[6] = net2272_read(dev, SETUP6);
1799		u.raw[7] = net2272_read(dev, SETUP7);
1800		/*
1801		 * If you have a big endian cpu make sure le16_to_cpus
1802		 * performs the proper byte swapping here...
1803		 */
1804		le16_to_cpus(&u.r.wValue);
1805		le16_to_cpus(&u.r.wIndex);
1806		le16_to_cpus(&u.r.wLength);
1807
1808		/* ack the irq */
1809		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1810		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1811
1812		/* watch control traffic at the token level, and force
1813		 * synchronization before letting the status phase happen.
1814		 */
1815		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1816		if (ep->is_in) {
1817			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1818				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1819				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1820			stop_out_naking(ep);
1821		} else
1822			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1823				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1824				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1825		net2272_ep_write(ep, EP_IRQENB, scratch);
1826
1827		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1828			goto delegate;
1829		switch (u.r.bRequest) {
1830		case USB_REQ_GET_STATUS: {
1831			struct net2272_ep *e;
1832			u16 status = 0;
1833
1834			switch (u.r.bRequestType & USB_RECIP_MASK) {
1835			case USB_RECIP_ENDPOINT:
1836				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1837				if (!e || u.r.wLength > 2)
1838					goto do_stall;
1839				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1840					status = cpu_to_le16(1);
1841				else
1842					status = cpu_to_le16(0);
1843
1844				/* don't bother with a request object! */
1845				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1846				writew(status, net2272_reg_addr(dev, EP_DATA));
1847				set_fifo_bytecount(&dev->ep[0], 0);
1848				allow_status(ep);
1849				dev_vdbg(dev->dev, "%s stat %02x\n",
1850					ep->ep.name, status);
1851				goto next_endpoints;
1852			case USB_RECIP_DEVICE:
1853				if (u.r.wLength > 2)
1854					goto do_stall;
1855				if (dev->gadget.is_selfpowered)
1856					status = (1 << USB_DEVICE_SELF_POWERED);
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "device stat %02x\n", status);
1864				goto next_endpoints;
1865			case USB_RECIP_INTERFACE:
1866				if (u.r.wLength > 2)
1867					goto do_stall;
1868
1869				/* don't bother with a request object! */
1870				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1871				writew(status, net2272_reg_addr(dev, EP_DATA));
1872				set_fifo_bytecount(&dev->ep[0], 0);
1873				allow_status(ep);
1874				dev_vdbg(dev->dev, "interface status %02x\n", status);
1875				goto next_endpoints;
1876			}
1877
1878			break;
1879		}
1880		case USB_REQ_CLEAR_FEATURE: {
1881			struct net2272_ep *e;
1882
1883			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1884				goto delegate;
1885			if (u.r.wValue != USB_ENDPOINT_HALT ||
1886			    u.r.wLength != 0)
1887				goto do_stall;
1888			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1889			if (!e)
1890				goto do_stall;
1891			if (e->wedged) {
1892				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1893					ep->ep.name);
1894			} else {
1895				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1896				clear_halt(e);
1897			}
1898			allow_status(ep);
1899			goto next_endpoints;
1900		}
1901		case USB_REQ_SET_FEATURE: {
1902			struct net2272_ep *e;
1903
1904			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1905				if (u.r.wIndex != NORMAL_OPERATION)
1906					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1907				allow_status(ep);
1908				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1909				goto next_endpoints;
1910			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1911				goto delegate;
1912			if (u.r.wValue != USB_ENDPOINT_HALT ||
1913			    u.r.wLength != 0)
1914				goto do_stall;
1915			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1916			if (!e)
1917				goto do_stall;
1918			set_halt(e);
1919			allow_status(ep);
1920			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1921			goto next_endpoints;
1922		}
1923		case USB_REQ_SET_ADDRESS: {
1924			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1925			allow_status(ep);
1926			break;
1927		}
1928		default:
1929 delegate:
1930			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1931				"ep_cfg %08x\n",
1932				u.r.bRequestType, u.r.bRequest,
1933				u.r.wValue, u.r.wIndex,
1934				net2272_ep_read(ep, EP_CFG));
1935			spin_unlock(&dev->lock);
1936			tmp = dev->driver->setup(&dev->gadget, &u.r);
1937			spin_lock(&dev->lock);
 
 
1938		}
1939
1940		/* stall ep0 on error */
1941		if (tmp < 0) {
1942 do_stall:
1943			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1944				u.r.bRequestType, u.r.bRequest, tmp);
1945			dev->protocol_stall = 1;
1946		}
1947	/* endpoint dma irq? */
1948	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1949		net2272_cancel_dma(dev);
1950		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1951		stat &= ~(1 << DMA_DONE_INTERRUPT);
1952		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1953			? 2 : 1;
1954
1955		ep = &dev->ep[num];
1956		net2272_handle_dma(ep);
1957	}
1958
1959 next_endpoints:
1960	/* endpoint data irq? */
1961	scratch = stat & 0x0f;
1962	stat &= ~0x0f;
1963	for (num = 0; scratch; num++) {
1964		u8 t;
1965
1966		/* does this endpoint's FIFO and queue need tending? */
1967		t = 1 << num;
1968		if ((scratch & t) == 0)
1969			continue;
1970		scratch ^= t;
1971
1972		ep = &dev->ep[num];
1973		net2272_handle_ep(ep);
1974	}
1975
1976	/* some interrupts we can just ignore */
1977	stat &= ~(1 << SOF_INTERRUPT);
1978
1979	if (stat)
1980		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1981}
1982
1983static void
1984net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1985{
1986	u8 tmp, mask;
1987
1988	/* after disconnect there's nothing else to do! */
1989	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1990	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1991
1992	if (stat & tmp) {
1993		bool	reset = false;
1994		bool	disconnect = false;
1995
1996		/*
1997		 * Ignore disconnects and resets if the speed hasn't been set.
1998		 * VBUS can bounce and there's always an initial reset.
1999		 */
2000		net2272_write(dev, IRQSTAT1, tmp);
2001		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2002			if ((stat & (1 << VBUS_INTERRUPT)) &&
2003					(net2272_read(dev, USBCTL1) &
2004						(1 << VBUS_PIN)) == 0) {
2005				disconnect = true;
2006				dev_dbg(dev->dev, "disconnect %s\n",
2007					dev->driver->driver.name);
2008			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2009					(net2272_read(dev, USBCTL1) & mask)
2010						== 0) {
2011				reset = true;
2012				dev_dbg(dev->dev, "reset %s\n",
2013					dev->driver->driver.name);
2014			}
2015
2016			if (disconnect || reset) {
2017				stop_activity(dev, dev->driver);
2018				net2272_ep0_start(dev);
2019				spin_unlock(&dev->lock);
2020				if (reset)
2021					usb_gadget_udc_reset
2022						(&dev->gadget, dev->driver);
2023				else
2024					(dev->driver->disconnect)
2025						(&dev->gadget);
2026				spin_lock(&dev->lock);
2027				return;
2028			}
2029		}
2030		stat &= ~tmp;
2031
2032		if (!stat)
2033			return;
2034	}
2035
2036	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2037	if (stat & tmp) {
2038		net2272_write(dev, IRQSTAT1, tmp);
2039		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2040			if (dev->driver->suspend)
2041				dev->driver->suspend(&dev->gadget);
2042			if (!enable_suspend) {
2043				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2044				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2045			}
2046		} else {
2047			if (dev->driver->resume)
2048				dev->driver->resume(&dev->gadget);
2049		}
2050		stat &= ~tmp;
2051	}
2052
2053	/* clear any other status/irqs */
2054	if (stat)
2055		net2272_write(dev, IRQSTAT1, stat);
2056
2057	/* some status we can just ignore */
2058	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2059			| (1 << SUSPEND_REQUEST_INTERRUPT)
2060			| (1 << RESUME_INTERRUPT));
2061	if (!stat)
2062		return;
2063	else
2064		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2065}
2066
2067static irqreturn_t net2272_irq(int irq, void *_dev)
2068{
2069	struct net2272 *dev = _dev;
2070#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2071	u32 intcsr;
2072#endif
2073#if defined(PLX_PCI_RDK)
2074	u8 dmareq;
2075#endif
2076	spin_lock(&dev->lock);
2077#if defined(PLX_PCI_RDK)
2078	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2079
2080	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2081		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2082				dev->rdk1.plx9054_base_addr + INTCSR);
2083		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2084		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2085		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2086		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2087			dev->rdk1.plx9054_base_addr + INTCSR);
2088	}
2089	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2090		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2091				dev->rdk1.plx9054_base_addr + DMACSR0);
2092
2093		dmareq = net2272_read(dev, DMAREQ);
2094		if (dmareq & 0x01)
2095			net2272_handle_dma(&dev->ep[2]);
2096		else
2097			net2272_handle_dma(&dev->ep[1]);
2098	}
2099#endif
2100#if defined(PLX_PCI_RDK2)
2101	/* see if PCI int for us by checking irqstat */
2102	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2103	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2104		spin_unlock(&dev->lock);
2105		return IRQ_NONE;
2106	}
2107	/* check dma interrupts */
2108#endif
2109	/* Platform/devcice interrupt handler */
2110#if !defined(PLX_PCI_RDK)
2111	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2112	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2113#endif
2114	spin_unlock(&dev->lock);
2115
2116	return IRQ_HANDLED;
2117}
2118
2119static int net2272_present(struct net2272 *dev)
2120{
2121	/*
2122	 * Quick test to see if CPU can communicate properly with the NET2272.
2123	 * Verifies connection using writes and reads to write/read and
2124	 * read-only registers.
2125	 *
2126	 * This routine is strongly recommended especially during early bring-up
2127	 * of new hardware, however for designs that do not apply Power On System
2128	 * Tests (POST) it may discarded (or perhaps minimized).
2129	 */
2130	unsigned int ii;
2131	u8 val, refval;
2132
2133	/* Verify NET2272 write/read SCRATCH register can write and read */
2134	refval = net2272_read(dev, SCRATCH);
2135	for (ii = 0; ii < 0x100; ii += 7) {
2136		net2272_write(dev, SCRATCH, ii);
2137		val = net2272_read(dev, SCRATCH);
2138		if (val != ii) {
2139			dev_dbg(dev->dev,
2140				"%s: write/read SCRATCH register test failed: "
2141				"wrote:0x%2.2x, read:0x%2.2x\n",
2142				__func__, ii, val);
2143			return -EINVAL;
2144		}
2145	}
2146	/* To be nice, we write the original SCRATCH value back: */
2147	net2272_write(dev, SCRATCH, refval);
2148
2149	/* Verify NET2272 CHIPREV register is read-only: */
2150	refval = net2272_read(dev, CHIPREV_2272);
2151	for (ii = 0; ii < 0x100; ii += 7) {
2152		net2272_write(dev, CHIPREV_2272, ii);
2153		val = net2272_read(dev, CHIPREV_2272);
2154		if (val != refval) {
2155			dev_dbg(dev->dev,
2156				"%s: write/read CHIPREV register test failed: "
2157				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2158				__func__, ii, val, refval);
2159			return -EINVAL;
2160		}
2161	}
2162
2163	/*
2164	 * Verify NET2272's "NET2270 legacy revision" register
2165	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2166	 *    register should read the same value, regardless of the NET2272
2167	 *    silicon revision.  The legacy register applies to NET2270
2168	 *    firmware being applied to the NET2272.
2169	 */
2170	val = net2272_read(dev, CHIPREV_LEGACY);
2171	if (val != NET2270_LEGACY_REV) {
2172		/*
2173		 * Unexpected legacy revision value
2174		 * - Perhaps the chip is a NET2270?
2175		 */
2176		dev_dbg(dev->dev,
2177			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2178			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2179			__func__, NET2270_LEGACY_REV, val);
2180		return -EINVAL;
2181	}
2182
2183	/*
2184	 * Verify NET2272 silicon revision
2185	 *  - This revision register is appropriate for the silicon version
2186	 *    of the NET2272
2187	 */
2188	val = net2272_read(dev, CHIPREV_2272);
2189	switch (val) {
2190	case CHIPREV_NET2272_R1:
2191		/*
2192		 * NET2272 Rev 1 has DMA related errata:
2193		 *  - Newer silicon (Rev 1A or better) required
2194		 */
2195		dev_dbg(dev->dev,
2196			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2197			__func__);
2198		break;
2199	case CHIPREV_NET2272_R1A:
2200		break;
2201	default:
2202		/* NET2272 silicon version *may* not work with this firmware */
2203		dev_dbg(dev->dev,
2204			"%s: unexpected silicon revision register value: "
2205			" CHIPREV_2272: 0x%2.2x\n",
2206			__func__, val);
2207		/*
2208		 * Return Success, even though the chip rev is not an expected value
2209		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2210		 *  - Often, new silicon is perfectly compatible
2211		 */
2212	}
2213
2214	/* Success: NET2272 checks out OK */
2215	return 0;
2216}
2217
2218static void
2219net2272_gadget_release(struct device *_dev)
2220{
2221	struct net2272 *dev = dev_get_drvdata(_dev);
 
2222	kfree(dev);
2223}
2224
2225/*---------------------------------------------------------------------------*/
2226
2227static void
2228net2272_remove(struct net2272 *dev)
2229{
2230	usb_del_gadget_udc(&dev->gadget);
 
2231	free_irq(dev->irq, dev);
2232	iounmap(dev->base_addr);
2233	device_remove_file(dev->dev, &dev_attr_registers);
2234
2235	dev_info(dev->dev, "unbind\n");
2236}
2237
2238static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2239{
2240	struct net2272 *ret;
2241
2242	if (!irq) {
2243		dev_dbg(dev, "No IRQ!\n");
2244		return ERR_PTR(-ENODEV);
2245	}
2246
2247	/* alloc, and start init */
2248	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2249	if (!ret)
2250		return ERR_PTR(-ENOMEM);
2251
2252	spin_lock_init(&ret->lock);
2253	ret->irq = irq;
2254	ret->dev = dev;
2255	ret->gadget.ops = &net2272_ops;
2256	ret->gadget.max_speed = USB_SPEED_HIGH;
2257
2258	/* the "gadget" abstracts/virtualizes the controller */
2259	ret->gadget.name = driver_name;
 
2260
2261	return ret;
2262}
2263
2264static int
2265net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2266{
2267	int ret;
2268
2269	/* See if there... */
2270	if (net2272_present(dev)) {
2271		dev_warn(dev->dev, "2272 not found!\n");
2272		ret = -ENODEV;
2273		goto err;
2274	}
2275
2276	net2272_usb_reset(dev);
2277	net2272_usb_reinit(dev);
2278
2279	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2280	if (ret) {
2281		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2282		goto err;
2283	}
2284
2285	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2286
2287	/* done */
2288	dev_info(dev->dev, "%s\n", driver_desc);
2289	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2290		dev->irq, dev->base_addr, dev->chiprev,
2291		dma_mode_string());
2292	dev_info(dev->dev, "version: %s\n", driver_vers);
2293
2294	ret = device_create_file(dev->dev, &dev_attr_registers);
2295	if (ret)
2296		goto err_irq;
2297
2298	ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2299			net2272_gadget_release);
2300	if (ret)
2301		goto err_add_udc;
 
2302
2303	return 0;
2304
2305err_add_udc:
2306	device_remove_file(dev->dev, &dev_attr_registers);
2307 err_irq:
2308	free_irq(dev->irq, dev);
2309 err:
2310	return ret;
2311}
2312
2313#ifdef CONFIG_PCI
2314
2315/*
2316 * wrap this driver around the specified device, but
2317 * don't respond over USB until a gadget driver binds to us
2318 */
2319
2320static int
2321net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2322{
2323	unsigned long resource, len, tmp;
2324	void __iomem *mem_mapped_addr[4];
2325	int ret, i;
2326
2327	/*
2328	 * BAR 0 holds PLX 9054 config registers
2329	 * BAR 1 is i/o memory; unused here
2330	 * BAR 2 holds EPLD config registers
2331	 * BAR 3 holds NET2272 registers
2332	 */
2333
2334	/* Find and map all address spaces */
2335	for (i = 0; i < 4; ++i) {
2336		if (i == 1)
2337			continue;	/* BAR1 unused */
2338
2339		resource = pci_resource_start(pdev, i);
2340		len = pci_resource_len(pdev, i);
2341
2342		if (!request_mem_region(resource, len, driver_name)) {
2343			dev_dbg(dev->dev, "controller already in use\n");
2344			ret = -EBUSY;
2345			goto err;
2346		}
2347
2348		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2349		if (mem_mapped_addr[i] == NULL) {
2350			release_mem_region(resource, len);
2351			dev_dbg(dev->dev, "can't map memory\n");
2352			ret = -EFAULT;
2353			goto err;
2354		}
2355	}
2356
2357	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2358	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2359	dev->base_addr = mem_mapped_addr[3];
2360
2361	/* Set PLX 9054 bus width (16 bits) */
2362	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2363	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2364			dev->rdk1.plx9054_base_addr + LBRD1);
2365
2366	/* Enable PLX 9054 Interrupts */
2367	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2368			(1 << PCI_INTERRUPT_ENABLE) |
2369			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2370			dev->rdk1.plx9054_base_addr + INTCSR);
2371
2372	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2373			dev->rdk1.plx9054_base_addr + DMACSR0);
2374
2375	/* reset */
2376	writeb((1 << EPLD_DMA_ENABLE) |
2377		(1 << DMA_CTL_DACK) |
2378		(1 << DMA_TIMEOUT_ENABLE) |
2379		(1 << USER) |
2380		(0 << MPX_MODE) |
2381		(1 << BUSWIDTH) |
2382		(1 << NET2272_RESET),
2383		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2384
2385	mb();
2386	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2387		~(1 << NET2272_RESET),
2388		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2389	udelay(200);
2390
2391	return 0;
2392
2393 err:
2394	while (--i >= 0) {
 
 
2395		iounmap(mem_mapped_addr[i]);
2396		release_mem_region(pci_resource_start(pdev, i),
2397			pci_resource_len(pdev, i));
2398	}
2399
2400	return ret;
2401}
2402
2403static int
2404net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2405{
2406	unsigned long resource, len;
2407	void __iomem *mem_mapped_addr[2];
2408	int ret, i;
2409
2410	/*
2411	 * BAR 0 holds FGPA config registers
2412	 * BAR 1 holds NET2272 registers
2413	 */
2414
2415	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2416	for (i = 0; i < 2; ++i) {
2417		resource = pci_resource_start(pdev, i);
2418		len = pci_resource_len(pdev, i);
2419
2420		if (!request_mem_region(resource, len, driver_name)) {
2421			dev_dbg(dev->dev, "controller already in use\n");
2422			ret = -EBUSY;
2423			goto err;
2424		}
2425
2426		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2427		if (mem_mapped_addr[i] == NULL) {
2428			release_mem_region(resource, len);
2429			dev_dbg(dev->dev, "can't map memory\n");
2430			ret = -EFAULT;
2431			goto err;
2432		}
2433	}
2434
2435	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2436	dev->base_addr = mem_mapped_addr[1];
2437
2438	mb();
2439	/* Set 2272 bus width (16 bits) and reset */
2440	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2441	udelay(200);
2442	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2443	/* Print fpga version number */
2444	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2445		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2446	/* Enable FPGA Interrupts */
2447	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2448
2449	return 0;
2450
2451 err:
2452	while (--i >= 0) {
2453		iounmap(mem_mapped_addr[i]);
2454		release_mem_region(pci_resource_start(pdev, i),
2455			pci_resource_len(pdev, i));
2456	}
2457
2458	return ret;
2459}
2460
2461static int
2462net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2463{
2464	struct net2272 *dev;
2465	int ret;
2466
2467	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2468	if (IS_ERR(dev))
2469		return PTR_ERR(dev);
2470	dev->dev_id = pdev->device;
2471
2472	if (pci_enable_device(pdev) < 0) {
2473		ret = -ENODEV;
2474		goto err_free;
2475	}
2476
2477	pci_set_master(pdev);
2478
2479	switch (pdev->device) {
2480	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2481	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2482	default: BUG();
2483	}
2484	if (ret)
2485		goto err_pci;
2486
2487	ret = net2272_probe_fin(dev, 0);
2488	if (ret)
2489		goto err_pci;
2490
2491	pci_set_drvdata(pdev, dev);
2492
2493	return 0;
2494
2495 err_pci:
2496	pci_disable_device(pdev);
2497 err_free:
2498	kfree(dev);
2499
2500	return ret;
2501}
2502
2503static void
2504net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2505{
2506	int i;
2507
2508	/* disable PLX 9054 interrupts */
2509	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2510		~(1 << PCI_INTERRUPT_ENABLE),
2511		dev->rdk1.plx9054_base_addr + INTCSR);
2512
2513	/* clean up resources allocated during probe() */
2514	iounmap(dev->rdk1.plx9054_base_addr);
2515	iounmap(dev->rdk1.epld_base_addr);
2516
2517	for (i = 0; i < 4; ++i) {
2518		if (i == 1)
2519			continue;	/* BAR1 unused */
2520		release_mem_region(pci_resource_start(pdev, i),
2521			pci_resource_len(pdev, i));
2522	}
2523}
2524
2525static void
2526net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2527{
2528	int i;
2529
2530	/* disable fpga interrupts
2531	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2532			~(1 << PCI_INTERRUPT_ENABLE),
2533			dev->rdk1.plx9054_base_addr + INTCSR);
2534	*/
2535
2536	/* clean up resources allocated during probe() */
2537	iounmap(dev->rdk2.fpga_base_addr);
2538
2539	for (i = 0; i < 2; ++i)
2540		release_mem_region(pci_resource_start(pdev, i),
2541			pci_resource_len(pdev, i));
2542}
2543
2544static void
2545net2272_pci_remove(struct pci_dev *pdev)
2546{
2547	struct net2272 *dev = pci_get_drvdata(pdev);
2548
2549	net2272_remove(dev);
2550
2551	switch (pdev->device) {
2552	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2553	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2554	default: BUG();
2555	}
2556
2557	pci_disable_device(pdev);
2558
2559	kfree(dev);
2560}
2561
2562/* Table of matching PCI IDs */
2563static struct pci_device_id pci_ids[] = {
2564	{	/* RDK 1 card */
2565		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2566		.class_mask  = 0,
2567		.vendor      = PCI_VENDOR_ID_PLX,
2568		.device      = PCI_DEVICE_ID_RDK1,
2569		.subvendor   = PCI_ANY_ID,
2570		.subdevice   = PCI_ANY_ID,
2571	},
2572	{	/* RDK 2 card */
2573		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2574		.class_mask  = 0,
2575		.vendor      = PCI_VENDOR_ID_PLX,
2576		.device      = PCI_DEVICE_ID_RDK2,
2577		.subvendor   = PCI_ANY_ID,
2578		.subdevice   = PCI_ANY_ID,
2579	},
2580	{ }
2581};
2582MODULE_DEVICE_TABLE(pci, pci_ids);
2583
2584static struct pci_driver net2272_pci_driver = {
2585	.name     = driver_name,
2586	.id_table = pci_ids,
2587
2588	.probe    = net2272_pci_probe,
2589	.remove   = net2272_pci_remove,
2590};
2591
2592static int net2272_pci_register(void)
2593{
2594	return pci_register_driver(&net2272_pci_driver);
2595}
2596
2597static void net2272_pci_unregister(void)
2598{
2599	pci_unregister_driver(&net2272_pci_driver);
2600}
2601
2602#else
2603static inline int net2272_pci_register(void) { return 0; }
2604static inline void net2272_pci_unregister(void) { }
2605#endif
2606
2607/*---------------------------------------------------------------------------*/
2608
2609static int
2610net2272_plat_probe(struct platform_device *pdev)
2611{
2612	struct net2272 *dev;
2613	int ret;
2614	unsigned int irqflags;
2615	resource_size_t base, len;
2616	struct resource *iomem, *iomem_bus, *irq_res;
2617
2618	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2619	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2620	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2621	if (!irq_res || !iomem) {
2622		dev_err(&pdev->dev, "must provide irq/base addr");
2623		return -EINVAL;
2624	}
2625
2626	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2627	if (IS_ERR(dev))
2628		return PTR_ERR(dev);
2629
2630	irqflags = 0;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2632		irqflags |= IRQF_TRIGGER_RISING;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2634		irqflags |= IRQF_TRIGGER_FALLING;
2635	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2636		irqflags |= IRQF_TRIGGER_HIGH;
2637	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2638		irqflags |= IRQF_TRIGGER_LOW;
2639
2640	base = iomem->start;
2641	len = resource_size(iomem);
2642	if (iomem_bus)
2643		dev->base_shift = iomem_bus->start;
2644
2645	if (!request_mem_region(base, len, driver_name)) {
2646		dev_dbg(dev->dev, "get request memory region!\n");
2647		ret = -EBUSY;
2648		goto err;
2649	}
2650	dev->base_addr = ioremap_nocache(base, len);
2651	if (!dev->base_addr) {
2652		dev_dbg(dev->dev, "can't map memory\n");
2653		ret = -EFAULT;
2654		goto err_req;
2655	}
2656
2657	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2658	if (ret)
2659		goto err_io;
2660
2661	platform_set_drvdata(pdev, dev);
2662	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2663		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2664
2665	return 0;
2666
2667 err_io:
2668	iounmap(dev->base_addr);
2669 err_req:
2670	release_mem_region(base, len);
2671 err:
 
 
2672	return ret;
2673}
2674
2675static int
2676net2272_plat_remove(struct platform_device *pdev)
2677{
2678	struct net2272 *dev = platform_get_drvdata(pdev);
2679
2680	net2272_remove(dev);
2681
2682	release_mem_region(pdev->resource[0].start,
2683		resource_size(&pdev->resource[0]));
2684
2685	kfree(dev);
2686
2687	return 0;
2688}
2689
2690static struct platform_driver net2272_plat_driver = {
2691	.probe   = net2272_plat_probe,
2692	.remove  = net2272_plat_remove,
2693	.driver  = {
2694		.name  = driver_name,
2695	},
2696	/* FIXME .suspend, .resume */
2697};
2698MODULE_ALIAS("platform:net2272");
2699
2700static int __init net2272_init(void)
2701{
2702	int ret;
2703
2704	ret = net2272_pci_register();
2705	if (ret)
2706		return ret;
2707	ret = platform_driver_register(&net2272_plat_driver);
2708	if (ret)
2709		goto err_pci;
2710	return ret;
2711
2712err_pci:
2713	net2272_pci_unregister();
2714	return ret;
2715}
2716module_init(net2272_init);
2717
2718static void __exit net2272_cleanup(void)
2719{
2720	net2272_pci_unregister();
2721	platform_driver_unregister(&net2272_plat_driver);
2722}
2723module_exit(net2272_cleanup);
2724
2725MODULE_DESCRIPTION(DRIVER_DESC);
2726MODULE_AUTHOR("PLX Technology, Inc.");
2727MODULE_LICENSE("GPL");