Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
  26#define DRV_VERSION	"1.2"
  27#define DRV_RELDATE	"11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45		 autosense	Autosensing active media.
  46		 10mbps_hd 	10Mbps half duplex.
  47		 10mbps_fd 	10Mbps full duplex.
  48		 100mbps_hd 	100Mbps half duplex.
  49		 100mbps_fd 	100Mbps full duplex.
  50		 0		Autosensing active media.
  51		 1	 	10Mbps half duplex.
  52		 2	 	10Mbps full duplex.
  53		 3	 	100Mbps half duplex.
  54		 4	 	100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE	32
  69#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE	64
  71#define RX_BUDGET	32
  72#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <asm/uaccess.h>
  95#include <asm/processor.h>		/* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] __devinitconst =
 106	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107	" Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122				Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
 203	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210	{ }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215	netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] __devinitdata = {
 222	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 223	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224	{"D-Link DFE-580TX 4 port Server Adapter"},
 225	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226	{"D-Link DL10050-based FAST Ethernet Adapter"},
 227	{"Sundance Technology Alta"},
 228	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229	{ }	/* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244	DMACtrl = 0x00,
 245	TxListPtr = 0x04,
 246	TxDMABurstThresh = 0x08,
 247	TxDMAUrgentThresh = 0x09,
 248	TxDMAPollPeriod = 0x0a,
 249	RxDMAStatus = 0x0c,
 250	RxListPtr = 0x10,
 251	DebugCtrl0 = 0x1a,
 252	DebugCtrl1 = 0x1c,
 253	RxDMABurstThresh = 0x14,
 254	RxDMAUrgentThresh = 0x15,
 255	RxDMAPollPeriod = 0x16,
 256	LEDCtrl = 0x1a,
 257	ASICCtrl = 0x30,
 258	EEData = 0x34,
 259	EECtrl = 0x36,
 260	FlashAddr = 0x40,
 261	FlashData = 0x44,
 262	TxStatus = 0x46,
 263	TxFrameId = 0x47,
 264	DownCounter = 0x18,
 265	IntrClear = 0x4a,
 266	IntrEnable = 0x4c,
 267	IntrStatus = 0x4e,
 268	MACCtrl0 = 0x50,
 269	MACCtrl1 = 0x52,
 270	StationAddr = 0x54,
 271	MaxFrameSize = 0x5A,
 272	RxMode = 0x5c,
 273	MIICtrl = 0x5e,
 274	MulticastFilter0 = 0x60,
 275	MulticastFilter1 = 0x64,
 276	RxOctetsLow = 0x68,
 277	RxOctetsHigh = 0x6a,
 278	TxOctetsLow = 0x6c,
 279	TxOctetsHigh = 0x6e,
 280	TxFramesOK = 0x70,
 281	RxFramesOK = 0x72,
 282	StatsCarrierError = 0x74,
 283	StatsLateColl = 0x75,
 284	StatsMultiColl = 0x76,
 285	StatsOneColl = 0x77,
 286	StatsTxDefer = 0x78,
 287	RxMissed = 0x79,
 288	StatsTxXSDefer = 0x7a,
 289	StatsTxAbort = 0x7b,
 290	StatsBcastTx = 0x7c,
 291	StatsBcastRx = 0x7d,
 292	StatsMcastTx = 0x7e,
 293	StatsMcastRx = 0x7f,
 294	/* Aliased and bogus values! */
 295	RxStatus = 0x0c,
 296};
 297
 298#define ASIC_HI_WORD(x)	((x) + 2)
 299
 300enum ASICCtrl_HiWord_bit {
 301	GlobalReset = 0x0001,
 302	RxReset = 0x0002,
 303	TxReset = 0x0004,
 304	DMAReset = 0x0008,
 305	FIFOReset = 0x0010,
 306	NetworkReset = 0x0020,
 307	HostReset = 0x0040,
 308	ResetBusy = 0x0400,
 309};
 310
 311/* Bits in the interrupt status/mask registers. */
 312enum intr_status_bits {
 313	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 314	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 315	IntrDrvRqst=0x0040,
 316	StatsMax=0x0080, LinkChange=0x0100,
 317	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 318};
 319
 320/* Bits in the RxMode register. */
 321enum rx_mode_bits {
 322	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 323	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 324};
 325/* Bits in MACCtrl. */
 326enum mac_ctrl0_bits {
 327	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 328	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 329};
 330enum mac_ctrl1_bits {
 331	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 332	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 333	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 334};
 335
 336/* The Rx and Tx buffer descriptors. */
 337/* Note that using only 32 bit fields simplifies conversion to big-endian
 338   architectures. */
 339struct netdev_desc {
 340	__le32 next_desc;
 341	__le32 status;
 342	struct desc_frag { __le32 addr, length; } frag[1];
 343};
 344
 345/* Bits in netdev_desc.status */
 346enum desc_status_bits {
 347	DescOwn=0x8000,
 348	DescEndPacket=0x4000,
 349	DescEndRing=0x2000,
 350	LastFrag=0x80000000,
 351	DescIntrOnTx=0x8000,
 352	DescIntrOnDMADone=0x80000000,
 353	DisableAlign = 0x00000001,
 354};
 355
 356#define PRIV_ALIGN	15 	/* Required alignment mask */
 357/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 358   within the structure. */
 359#define MII_CNT		4
 360struct netdev_private {
 361	/* Descriptor rings first for alignment. */
 362	struct netdev_desc *rx_ring;
 363	struct netdev_desc *tx_ring;
 364	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 365	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 366        dma_addr_t tx_ring_dma;
 367        dma_addr_t rx_ring_dma;
 368	struct timer_list timer;		/* Media monitoring timer. */
 369	/* ethtool extra stats */
 370	struct {
 371		u64 tx_multiple_collisions;
 372		u64 tx_single_collisions;
 373		u64 tx_late_collisions;
 374		u64 tx_deferred;
 375		u64 tx_deferred_excessive;
 376		u64 tx_aborted;
 377		u64 tx_bcasts;
 378		u64 rx_bcasts;
 379		u64 tx_mcasts;
 380		u64 rx_mcasts;
 381	} xstats;
 382	/* Frequently used values: keep some adjacent for cache effect. */
 383	spinlock_t lock;
 384	int msg_enable;
 385	int chip_id;
 386	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 387	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 388	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 389	unsigned int cur_tx, dirty_tx;
 390	/* These values are keep track of the transceiver/media in use. */
 391	unsigned int flowctrl:1;
 392	unsigned int default_port:4;		/* Last dev->if_port value. */
 393	unsigned int an_enable:1;
 394	unsigned int speed;
 395	struct tasklet_struct rx_tasklet;
 396	struct tasklet_struct tx_tasklet;
 397	int budget;
 398	int cur_task;
 399	/* Multicast and receive mode. */
 400	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 401	u16 mcast_filter[4];
 402	/* MII transceiver section. */
 403	struct mii_if_info mii_if;
 404	int mii_preamble_required;
 405	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 406	struct pci_dev *pci_dev;
 407	void __iomem *base;
 408	spinlock_t statlock;
 409};
 410
 411/* The station address location in the EEPROM. */
 412#define EEPROM_SA_OFFSET	0x10
 413#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 414			IntrDrvRqst | IntrTxDone | StatsMax | \
 415			LinkChange)
 416
 417static int  change_mtu(struct net_device *dev, int new_mtu);
 418static int  eeprom_read(void __iomem *ioaddr, int location);
 419static int  mdio_read(struct net_device *dev, int phy_id, int location);
 420static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 421static int  mdio_wait_link(struct net_device *dev, int wait);
 422static int  netdev_open(struct net_device *dev);
 423static void check_duplex(struct net_device *dev);
 424static void netdev_timer(unsigned long data);
 425static void tx_timeout(struct net_device *dev);
 426static void init_ring(struct net_device *dev);
 427static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 428static int reset_tx (struct net_device *dev);
 429static irqreturn_t intr_handler(int irq, void *dev_instance);
 430static void rx_poll(unsigned long data);
 431static void tx_poll(unsigned long data);
 432static void refill_rx (struct net_device *dev);
 433static void netdev_error(struct net_device *dev, int intr_status);
 434static void netdev_error(struct net_device *dev, int intr_status);
 435static void set_rx_mode(struct net_device *dev);
 436static int __set_mac_addr(struct net_device *dev);
 437static int sundance_set_mac_addr(struct net_device *dev, void *data);
 438static struct net_device_stats *get_stats(struct net_device *dev);
 439static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 440static int  netdev_close(struct net_device *dev);
 441static const struct ethtool_ops ethtool_ops;
 442
 443static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 444{
 445	struct netdev_private *np = netdev_priv(dev);
 446	void __iomem *ioaddr = np->base + ASICCtrl;
 447	int countdown;
 448
 449	/* ST201 documentation states ASICCtrl is a 32bit register */
 450	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 451	/* ST201 documentation states reset can take up to 1 ms */
 452	countdown = 10 + 1;
 453	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 454		if (--countdown == 0) {
 455			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 456			break;
 457		}
 458		udelay(100);
 459	}
 460}
 461
 462static const struct net_device_ops netdev_ops = {
 463	.ndo_open		= netdev_open,
 464	.ndo_stop		= netdev_close,
 465	.ndo_start_xmit		= start_tx,
 466	.ndo_get_stats 		= get_stats,
 467	.ndo_set_multicast_list = set_rx_mode,
 468	.ndo_do_ioctl 		= netdev_ioctl,
 469	.ndo_tx_timeout		= tx_timeout,
 470	.ndo_change_mtu		= change_mtu,
 471	.ndo_set_mac_address 	= sundance_set_mac_addr,
 472	.ndo_validate_addr	= eth_validate_addr,
 473};
 474
 475static int __devinit sundance_probe1 (struct pci_dev *pdev,
 476				      const struct pci_device_id *ent)
 477{
 478	struct net_device *dev;
 479	struct netdev_private *np;
 480	static int card_idx;
 481	int chip_idx = ent->driver_data;
 482	int irq;
 483	int i;
 484	void __iomem *ioaddr;
 485	u16 mii_ctl;
 486	void *ring_space;
 487	dma_addr_t ring_dma;
 488#ifdef USE_IO_OPS
 489	int bar = 0;
 490#else
 491	int bar = 1;
 492#endif
 493	int phy, phy_end, phy_idx = 0;
 494
 495/* when built into the kernel, we only print version if device is found */
 496#ifndef MODULE
 497	static int printed_version;
 498	if (!printed_version++)
 499		printk(version);
 500#endif
 501
 502	if (pci_enable_device(pdev))
 503		return -EIO;
 504	pci_set_master(pdev);
 505
 506	irq = pdev->irq;
 507
 508	dev = alloc_etherdev(sizeof(*np));
 509	if (!dev)
 510		return -ENOMEM;
 511	SET_NETDEV_DEV(dev, &pdev->dev);
 512
 513	if (pci_request_regions(pdev, DRV_NAME))
 514		goto err_out_netdev;
 515
 516	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 517	if (!ioaddr)
 518		goto err_out_res;
 519
 520	for (i = 0; i < 3; i++)
 521		((__le16 *)dev->dev_addr)[i] =
 522			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 523	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 524
 525	dev->base_addr = (unsigned long)ioaddr;
 526	dev->irq = irq;
 527
 528	np = netdev_priv(dev);
 529	np->base = ioaddr;
 530	np->pci_dev = pdev;
 531	np->chip_id = chip_idx;
 532	np->msg_enable = (1 << debug) - 1;
 533	spin_lock_init(&np->lock);
 534	spin_lock_init(&np->statlock);
 535	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 536	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 537
 538	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 539			&ring_dma, GFP_KERNEL);
 540	if (!ring_space)
 541		goto err_out_cleardev;
 542	np->tx_ring = (struct netdev_desc *)ring_space;
 543	np->tx_ring_dma = ring_dma;
 544
 545	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 546			&ring_dma, GFP_KERNEL);
 547	if (!ring_space)
 548		goto err_out_unmap_tx;
 549	np->rx_ring = (struct netdev_desc *)ring_space;
 550	np->rx_ring_dma = ring_dma;
 551
 552	np->mii_if.dev = dev;
 553	np->mii_if.mdio_read = mdio_read;
 554	np->mii_if.mdio_write = mdio_write;
 555	np->mii_if.phy_id_mask = 0x1f;
 556	np->mii_if.reg_num_mask = 0x1f;
 557
 558	/* The chip-specific entries in the device structure. */
 559	dev->netdev_ops = &netdev_ops;
 560	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 561	dev->watchdog_timeo = TX_TIMEOUT;
 562
 563	pci_set_drvdata(pdev, dev);
 564
 565	i = register_netdev(dev);
 566	if (i)
 567		goto err_out_unmap_rx;
 568
 569	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 570	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 571	       dev->dev_addr, irq);
 572
 573	np->phys[0] = 1;		/* Default setting */
 574	np->mii_preamble_required++;
 575
 576	/*
 577	 * It seems some phys doesn't deal well with address 0 being accessed
 578	 * first
 579	 */
 580	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 581		phy = 0;
 582		phy_end = 31;
 583	} else {
 584		phy = 1;
 585		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 586	}
 587	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 588		int phyx = phy & 0x1f;
 589		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 590		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 591			np->phys[phy_idx++] = phyx;
 592			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 593			if ((mii_status & 0x0040) == 0)
 594				np->mii_preamble_required++;
 595			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 596				   "0x%4.4x advertising %4.4x.\n",
 597				   dev->name, phyx, mii_status, np->mii_if.advertising);
 598		}
 599	}
 600	np->mii_preamble_required--;
 601
 602	if (phy_idx == 0) {
 603		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 604			   dev->name, ioread32(ioaddr + ASICCtrl));
 605		goto err_out_unregister;
 606	}
 607
 608	np->mii_if.phy_id = np->phys[0];
 609
 610	/* Parse override configuration */
 611	np->an_enable = 1;
 612	if (card_idx < MAX_UNITS) {
 613		if (media[card_idx] != NULL) {
 614			np->an_enable = 0;
 615			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 616			    strcmp (media[card_idx], "4") == 0) {
 617				np->speed = 100;
 618				np->mii_if.full_duplex = 1;
 619			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 620				   strcmp (media[card_idx], "3") == 0) {
 621				np->speed = 100;
 622				np->mii_if.full_duplex = 0;
 623			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 624				   strcmp (media[card_idx], "2") == 0) {
 625				np->speed = 10;
 626				np->mii_if.full_duplex = 1;
 627			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 628				   strcmp (media[card_idx], "1") == 0) {
 629				np->speed = 10;
 630				np->mii_if.full_duplex = 0;
 631			} else {
 632				np->an_enable = 1;
 633			}
 634		}
 635		if (flowctrl == 1)
 636			np->flowctrl = 1;
 637	}
 638
 639	/* Fibre PHY? */
 640	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 641		/* Default 100Mbps Full */
 642		if (np->an_enable) {
 643			np->speed = 100;
 644			np->mii_if.full_duplex = 1;
 645			np->an_enable = 0;
 646		}
 647	}
 648	/* Reset PHY */
 649	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 650	mdelay (300);
 651	/* If flow control enabled, we need to advertise it.*/
 652	if (np->flowctrl)
 653		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 654	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 655	/* Force media type */
 656	if (!np->an_enable) {
 657		mii_ctl = 0;
 658		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 659		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 660		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 661		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 662			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 663
 664	}
 665
 666	/* Perhaps move the reset here? */
 667	/* Reset the chip to erase previous misconfiguration. */
 668	if (netif_msg_hw(np))
 669		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 670	sundance_reset(dev, 0x00ff << 16);
 671	if (netif_msg_hw(np))
 672		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 673
 674	card_idx++;
 675	return 0;
 676
 677err_out_unregister:
 678	unregister_netdev(dev);
 679err_out_unmap_rx:
 680	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 681		np->rx_ring, np->rx_ring_dma);
 682err_out_unmap_tx:
 683	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 684		np->tx_ring, np->tx_ring_dma);
 685err_out_cleardev:
 686	pci_set_drvdata(pdev, NULL);
 687	pci_iounmap(pdev, ioaddr);
 688err_out_res:
 689	pci_release_regions(pdev);
 690err_out_netdev:
 691	free_netdev (dev);
 692	return -ENODEV;
 693}
 694
 695static int change_mtu(struct net_device *dev, int new_mtu)
 696{
 697	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
 698		return -EINVAL;
 699	if (netif_running(dev))
 700		return -EBUSY;
 701	dev->mtu = new_mtu;
 702	return 0;
 703}
 704
 705#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 706/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 707static int __devinit eeprom_read(void __iomem *ioaddr, int location)
 708{
 709	int boguscnt = 10000;		/* Typical 1900 ticks. */
 710	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 711	do {
 712		eeprom_delay(ioaddr + EECtrl);
 713		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 714			return ioread16(ioaddr + EEData);
 715		}
 716	} while (--boguscnt > 0);
 717	return 0;
 718}
 719
 720/*  MII transceiver control section.
 721	Read and write the MII registers using software-generated serial
 722	MDIO protocol.  See the MII specifications or DP83840A data sheet
 723	for details.
 724
 725	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 726	met by back-to-back 33Mhz PCI cycles. */
 727#define mdio_delay() ioread8(mdio_addr)
 728
 729enum mii_reg_bits {
 730	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 731};
 732#define MDIO_EnbIn  (0)
 733#define MDIO_WRITE0 (MDIO_EnbOutput)
 734#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 735
 736/* Generate the preamble required for initial synchronization and
 737   a few older transceivers. */
 738static void mdio_sync(void __iomem *mdio_addr)
 739{
 740	int bits = 32;
 741
 742	/* Establish sync by sending at least 32 logic ones. */
 743	while (--bits >= 0) {
 744		iowrite8(MDIO_WRITE1, mdio_addr);
 745		mdio_delay();
 746		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 747		mdio_delay();
 748	}
 749}
 750
 751static int mdio_read(struct net_device *dev, int phy_id, int location)
 752{
 753	struct netdev_private *np = netdev_priv(dev);
 754	void __iomem *mdio_addr = np->base + MIICtrl;
 755	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 756	int i, retval = 0;
 757
 758	if (np->mii_preamble_required)
 759		mdio_sync(mdio_addr);
 760
 761	/* Shift the read command bits out. */
 762	for (i = 15; i >= 0; i--) {
 763		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 764
 765		iowrite8(dataval, mdio_addr);
 766		mdio_delay();
 767		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 768		mdio_delay();
 769	}
 770	/* Read the two transition, 16 data, and wire-idle bits. */
 771	for (i = 19; i > 0; i--) {
 772		iowrite8(MDIO_EnbIn, mdio_addr);
 773		mdio_delay();
 774		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 775		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 776		mdio_delay();
 777	}
 778	return (retval>>1) & 0xffff;
 779}
 780
 781static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 782{
 783	struct netdev_private *np = netdev_priv(dev);
 784	void __iomem *mdio_addr = np->base + MIICtrl;
 785	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 786	int i;
 787
 788	if (np->mii_preamble_required)
 789		mdio_sync(mdio_addr);
 790
 791	/* Shift the command bits out. */
 792	for (i = 31; i >= 0; i--) {
 793		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 794
 795		iowrite8(dataval, mdio_addr);
 796		mdio_delay();
 797		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 798		mdio_delay();
 799	}
 800	/* Clear out extra bits. */
 801	for (i = 2; i > 0; i--) {
 802		iowrite8(MDIO_EnbIn, mdio_addr);
 803		mdio_delay();
 804		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 805		mdio_delay();
 806	}
 807}
 808
 809static int mdio_wait_link(struct net_device *dev, int wait)
 810{
 811	int bmsr;
 812	int phy_id;
 813	struct netdev_private *np;
 814
 815	np = netdev_priv(dev);
 816	phy_id = np->phys[0];
 817
 818	do {
 819		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 820		if (bmsr & 0x0004)
 821			return 0;
 822		mdelay(1);
 823	} while (--wait > 0);
 824	return -1;
 825}
 826
 827static int netdev_open(struct net_device *dev)
 828{
 829	struct netdev_private *np = netdev_priv(dev);
 830	void __iomem *ioaddr = np->base;
 831	unsigned long flags;
 832	int i;
 833
 834	/* Do we need to reset the chip??? */
 835
 836	i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 837	if (i)
 838		return i;
 839
 840	if (netif_msg_ifup(np))
 841		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 842			   dev->name, dev->irq);
 843	init_ring(dev);
 844
 845	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 846	/* The Tx list pointer is written as packets are queued. */
 847
 848	/* Initialize other registers. */
 849	__set_mac_addr(dev);
 850#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 851	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 852#else
 853	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 854#endif
 855	if (dev->mtu > 2047)
 856		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 857
 858	/* Configure the PCI bus bursts and FIFO thresholds. */
 859
 860	if (dev->if_port == 0)
 861		dev->if_port = np->default_port;
 862
 863	spin_lock_init(&np->mcastlock);
 864
 865	set_rx_mode(dev);
 866	iowrite16(0, ioaddr + IntrEnable);
 867	iowrite16(0, ioaddr + DownCounter);
 868	/* Set the chip to poll every N*320nsec. */
 869	iowrite8(100, ioaddr + RxDMAPollPeriod);
 870	iowrite8(127, ioaddr + TxDMAPollPeriod);
 871	/* Fix DFE-580TX packet drop issue */
 872	if (np->pci_dev->revision >= 0x14)
 873		iowrite8(0x01, ioaddr + DebugCtrl1);
 874	netif_start_queue(dev);
 875
 876	spin_lock_irqsave(&np->lock, flags);
 877	reset_tx(dev);
 878	spin_unlock_irqrestore(&np->lock, flags);
 879
 880	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 881
 882	if (netif_msg_ifup(np))
 883		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 884			   "MAC Control %x, %4.4x %4.4x.\n",
 885			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 886			   ioread32(ioaddr + MACCtrl0),
 887			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 888
 889	/* Set the timer to check for link beat. */
 890	init_timer(&np->timer);
 891	np->timer.expires = jiffies + 3*HZ;
 892	np->timer.data = (unsigned long)dev;
 893	np->timer.function = netdev_timer;				/* timer handler */
 894	add_timer(&np->timer);
 895
 896	/* Enable interrupts by setting the interrupt mask. */
 897	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 898
 899	return 0;
 900}
 901
 902static void check_duplex(struct net_device *dev)
 903{
 904	struct netdev_private *np = netdev_priv(dev);
 905	void __iomem *ioaddr = np->base;
 906	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 907	int negotiated = mii_lpa & np->mii_if.advertising;
 908	int duplex;
 909
 910	/* Force media */
 911	if (!np->an_enable || mii_lpa == 0xffff) {
 912		if (np->mii_if.full_duplex)
 913			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 914				ioaddr + MACCtrl0);
 915		return;
 916	}
 917
 918	/* Autonegotiation */
 919	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 920	if (np->mii_if.full_duplex != duplex) {
 921		np->mii_if.full_duplex = duplex;
 922		if (netif_msg_link(np))
 923			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 924				   "negotiated capability %4.4x.\n", dev->name,
 925				   duplex ? "full" : "half", np->phys[0], negotiated);
 926		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 927	}
 928}
 929
 930static void netdev_timer(unsigned long data)
 931{
 932	struct net_device *dev = (struct net_device *)data;
 933	struct netdev_private *np = netdev_priv(dev);
 934	void __iomem *ioaddr = np->base;
 935	int next_tick = 10*HZ;
 936
 937	if (netif_msg_timer(np)) {
 938		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 939			   "Tx %x Rx %x.\n",
 940			   dev->name, ioread16(ioaddr + IntrEnable),
 941			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 942	}
 943	check_duplex(dev);
 944	np->timer.expires = jiffies + next_tick;
 945	add_timer(&np->timer);
 946}
 947
 948static void tx_timeout(struct net_device *dev)
 949{
 950	struct netdev_private *np = netdev_priv(dev);
 951	void __iomem *ioaddr = np->base;
 952	unsigned long flag;
 953
 954	netif_stop_queue(dev);
 955	tasklet_disable(&np->tx_tasklet);
 956	iowrite16(0, ioaddr + IntrEnable);
 957	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 958		   "TxFrameId %2.2x,"
 959		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 960		   ioread8(ioaddr + TxFrameId));
 961
 962	{
 963		int i;
 964		for (i=0; i<TX_RING_SIZE; i++) {
 965			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 966				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 967				le32_to_cpu(np->tx_ring[i].next_desc),
 968				le32_to_cpu(np->tx_ring[i].status),
 969				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 970				le32_to_cpu(np->tx_ring[i].frag[0].addr),
 971				le32_to_cpu(np->tx_ring[i].frag[0].length));
 972		}
 973		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 974			ioread32(np->base + TxListPtr),
 975			netif_queue_stopped(dev));
 976		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 977			np->cur_tx, np->cur_tx % TX_RING_SIZE,
 978			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 979		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 980		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 981	}
 982	spin_lock_irqsave(&np->lock, flag);
 983
 984	/* Stop and restart the chip's Tx processes . */
 985	reset_tx(dev);
 986	spin_unlock_irqrestore(&np->lock, flag);
 987
 988	dev->if_port = 0;
 989
 990	dev->trans_start = jiffies; /* prevent tx timeout */
 991	dev->stats.tx_errors++;
 992	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
 993		netif_wake_queue(dev);
 994	}
 995	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 996	tasklet_enable(&np->tx_tasklet);
 997}
 998
 999
1000/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1001static void init_ring(struct net_device *dev)
1002{
1003	struct netdev_private *np = netdev_priv(dev);
1004	int i;
1005
1006	np->cur_rx = np->cur_tx = 0;
1007	np->dirty_rx = np->dirty_tx = 0;
1008	np->cur_task = 0;
1009
1010	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1011
1012	/* Initialize all Rx descriptors. */
1013	for (i = 0; i < RX_RING_SIZE; i++) {
1014		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1015			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1016		np->rx_ring[i].status = 0;
1017		np->rx_ring[i].frag[0].length = 0;
1018		np->rx_skbuff[i] = NULL;
1019	}
1020
1021	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1022	for (i = 0; i < RX_RING_SIZE; i++) {
1023		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
1024		np->rx_skbuff[i] = skb;
1025		if (skb == NULL)
1026			break;
1027		skb->dev = dev;		/* Mark as being used by this device. */
1028		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1029		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1030			dma_map_single(&np->pci_dev->dev, skb->data,
1031				np->rx_buf_sz, DMA_FROM_DEVICE));
1032		if (dma_mapping_error(&np->pci_dev->dev,
1033					np->rx_ring[i].frag[0].addr)) {
1034			dev_kfree_skb(skb);
1035			np->rx_skbuff[i] = NULL;
1036			break;
1037		}
1038		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1039	}
1040	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1041
1042	for (i = 0; i < TX_RING_SIZE; i++) {
1043		np->tx_skbuff[i] = NULL;
1044		np->tx_ring[i].status = 0;
1045	}
1046}
1047
1048static void tx_poll (unsigned long data)
1049{
1050	struct net_device *dev = (struct net_device *)data;
1051	struct netdev_private *np = netdev_priv(dev);
1052	unsigned head = np->cur_task % TX_RING_SIZE;
1053	struct netdev_desc *txdesc =
1054		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1055
1056	/* Chain the next pointer */
1057	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1058		int entry = np->cur_task % TX_RING_SIZE;
1059		txdesc = &np->tx_ring[entry];
1060		if (np->last_tx) {
1061			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1062				entry*sizeof(struct netdev_desc));
1063		}
1064		np->last_tx = txdesc;
1065	}
1066	/* Indicate the latest descriptor of tx ring */
1067	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1068
1069	if (ioread32 (np->base + TxListPtr) == 0)
1070		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1071			np->base + TxListPtr);
1072}
1073
1074static netdev_tx_t
1075start_tx (struct sk_buff *skb, struct net_device *dev)
1076{
1077	struct netdev_private *np = netdev_priv(dev);
1078	struct netdev_desc *txdesc;
1079	unsigned entry;
1080
1081	/* Calculate the next Tx descriptor entry. */
1082	entry = np->cur_tx % TX_RING_SIZE;
1083	np->tx_skbuff[entry] = skb;
1084	txdesc = &np->tx_ring[entry];
1085
1086	txdesc->next_desc = 0;
1087	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1088	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1089				skb->data, skb->len, DMA_TO_DEVICE));
1090	if (dma_mapping_error(&np->pci_dev->dev,
1091				txdesc->frag[0].addr))
1092			goto drop_frame;
1093	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1094
1095	/* Increment cur_tx before tasklet_schedule() */
1096	np->cur_tx++;
1097	mb();
1098	/* Schedule a tx_poll() task */
1099	tasklet_schedule(&np->tx_tasklet);
1100
1101	/* On some architectures: explicitly flush cache lines here. */
1102	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1103	    !netif_queue_stopped(dev)) {
1104		/* do nothing */
1105	} else {
1106		netif_stop_queue (dev);
1107	}
1108	if (netif_msg_tx_queued(np)) {
1109		printk (KERN_DEBUG
1110			"%s: Transmit frame #%d queued in slot %d.\n",
1111			dev->name, np->cur_tx, entry);
1112	}
1113	return NETDEV_TX_OK;
1114
1115drop_frame:
1116	dev_kfree_skb(skb);
1117	np->tx_skbuff[entry] = NULL;
1118	dev->stats.tx_dropped++;
1119	return NETDEV_TX_OK;
1120}
1121
1122/* Reset hardware tx and free all of tx buffers */
1123static int
1124reset_tx (struct net_device *dev)
1125{
1126	struct netdev_private *np = netdev_priv(dev);
1127	void __iomem *ioaddr = np->base;
1128	struct sk_buff *skb;
1129	int i;
1130
1131	/* Reset tx logic, TxListPtr will be cleaned */
1132	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1133	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1134
1135	/* free all tx skbuff */
1136	for (i = 0; i < TX_RING_SIZE; i++) {
1137		np->tx_ring[i].next_desc = 0;
1138
1139		skb = np->tx_skbuff[i];
1140		if (skb) {
1141			dma_unmap_single(&np->pci_dev->dev,
1142				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1143				skb->len, DMA_TO_DEVICE);
1144			dev_kfree_skb_any(skb);
1145			np->tx_skbuff[i] = NULL;
1146			dev->stats.tx_dropped++;
1147		}
1148	}
1149	np->cur_tx = np->dirty_tx = 0;
1150	np->cur_task = 0;
1151
1152	np->last_tx = NULL;
1153	iowrite8(127, ioaddr + TxDMAPollPeriod);
1154
1155	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1156	return 0;
1157}
1158
1159/* The interrupt handler cleans up after the Tx thread,
1160   and schedule a Rx thread work */
1161static irqreturn_t intr_handler(int irq, void *dev_instance)
1162{
1163	struct net_device *dev = (struct net_device *)dev_instance;
1164	struct netdev_private *np = netdev_priv(dev);
1165	void __iomem *ioaddr = np->base;
1166	int hw_frame_id;
1167	int tx_cnt;
1168	int tx_status;
1169	int handled = 0;
1170	int i;
1171
1172
1173	do {
1174		int intr_status = ioread16(ioaddr + IntrStatus);
1175		iowrite16(intr_status, ioaddr + IntrStatus);
1176
1177		if (netif_msg_intr(np))
1178			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1179				   dev->name, intr_status);
1180
1181		if (!(intr_status & DEFAULT_INTR))
1182			break;
1183
1184		handled = 1;
1185
1186		if (intr_status & (IntrRxDMADone)) {
1187			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1188					ioaddr + IntrEnable);
1189			if (np->budget < 0)
1190				np->budget = RX_BUDGET;
1191			tasklet_schedule(&np->rx_tasklet);
1192		}
1193		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1194			tx_status = ioread16 (ioaddr + TxStatus);
1195			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1196				if (netif_msg_tx_done(np))
1197					printk
1198					    ("%s: Transmit status is %2.2x.\n",
1199				     	dev->name, tx_status);
1200				if (tx_status & 0x1e) {
1201					if (netif_msg_tx_err(np))
1202						printk("%s: Transmit error status %4.4x.\n",
1203							   dev->name, tx_status);
1204					dev->stats.tx_errors++;
1205					if (tx_status & 0x10)
1206						dev->stats.tx_fifo_errors++;
1207					if (tx_status & 0x08)
1208						dev->stats.collisions++;
1209					if (tx_status & 0x04)
1210						dev->stats.tx_fifo_errors++;
1211					if (tx_status & 0x02)
1212						dev->stats.tx_window_errors++;
1213
1214					/*
1215					** This reset has been verified on
1216					** DFE-580TX boards ! phdm@macqel.be.
1217					*/
1218					if (tx_status & 0x10) {	/* TxUnderrun */
1219						/* Restart Tx FIFO and transmitter */
1220						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1221						/* No need to reset the Tx pointer here */
1222					}
1223					/* Restart the Tx. Need to make sure tx enabled */
1224					i = 10;
1225					do {
1226						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1227						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1228							break;
1229						mdelay(1);
1230					} while (--i);
1231				}
1232				/* Yup, this is a documentation bug.  It cost me *hours*. */
1233				iowrite16 (0, ioaddr + TxStatus);
1234				if (tx_cnt < 0) {
1235					iowrite32(5000, ioaddr + DownCounter);
1236					break;
1237				}
1238				tx_status = ioread16 (ioaddr + TxStatus);
1239			}
1240			hw_frame_id = (tx_status >> 8) & 0xff;
1241		} else 	{
1242			hw_frame_id = ioread8(ioaddr + TxFrameId);
1243		}
1244
1245		if (np->pci_dev->revision >= 0x14) {
1246			spin_lock(&np->lock);
1247			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1248				int entry = np->dirty_tx % TX_RING_SIZE;
1249				struct sk_buff *skb;
1250				int sw_frame_id;
1251				sw_frame_id = (le32_to_cpu(
1252					np->tx_ring[entry].status) >> 2) & 0xff;
1253				if (sw_frame_id == hw_frame_id &&
1254					!(le32_to_cpu(np->tx_ring[entry].status)
1255					& 0x00010000))
1256						break;
1257				if (sw_frame_id == (hw_frame_id + 1) %
1258					TX_RING_SIZE)
1259						break;
1260				skb = np->tx_skbuff[entry];
1261				/* Free the original skb. */
1262				dma_unmap_single(&np->pci_dev->dev,
1263					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1264					skb->len, DMA_TO_DEVICE);
1265				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1266				np->tx_skbuff[entry] = NULL;
1267				np->tx_ring[entry].frag[0].addr = 0;
1268				np->tx_ring[entry].frag[0].length = 0;
1269			}
1270			spin_unlock(&np->lock);
1271		} else {
1272			spin_lock(&np->lock);
1273			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1274				int entry = np->dirty_tx % TX_RING_SIZE;
1275				struct sk_buff *skb;
1276				if (!(le32_to_cpu(np->tx_ring[entry].status)
1277							& 0x00010000))
1278					break;
1279				skb = np->tx_skbuff[entry];
1280				/* Free the original skb. */
1281				dma_unmap_single(&np->pci_dev->dev,
1282					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1283					skb->len, DMA_TO_DEVICE);
1284				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1285				np->tx_skbuff[entry] = NULL;
1286				np->tx_ring[entry].frag[0].addr = 0;
1287				np->tx_ring[entry].frag[0].length = 0;
1288			}
1289			spin_unlock(&np->lock);
1290		}
1291
1292		if (netif_queue_stopped(dev) &&
1293			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1294			/* The ring is no longer full, clear busy flag. */
1295			netif_wake_queue (dev);
1296		}
1297		/* Abnormal error summary/uncommon events handlers. */
1298		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1299			netdev_error(dev, intr_status);
1300	} while (0);
1301	if (netif_msg_intr(np))
1302		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1303			   dev->name, ioread16(ioaddr + IntrStatus));
1304	return IRQ_RETVAL(handled);
1305}
1306
1307static void rx_poll(unsigned long data)
1308{
1309	struct net_device *dev = (struct net_device *)data;
1310	struct netdev_private *np = netdev_priv(dev);
1311	int entry = np->cur_rx % RX_RING_SIZE;
1312	int boguscnt = np->budget;
1313	void __iomem *ioaddr = np->base;
1314	int received = 0;
1315
1316	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1317	while (1) {
1318		struct netdev_desc *desc = &(np->rx_ring[entry]);
1319		u32 frame_status = le32_to_cpu(desc->status);
1320		int pkt_len;
1321
1322		if (--boguscnt < 0) {
1323			goto not_done;
1324		}
1325		if (!(frame_status & DescOwn))
1326			break;
1327		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1328		if (netif_msg_rx_status(np))
1329			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1330				   frame_status);
1331		if (frame_status & 0x001f4000) {
1332			/* There was a error. */
1333			if (netif_msg_rx_err(np))
1334				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1335					   frame_status);
1336			dev->stats.rx_errors++;
1337			if (frame_status & 0x00100000)
1338				dev->stats.rx_length_errors++;
1339			if (frame_status & 0x00010000)
1340				dev->stats.rx_fifo_errors++;
1341			if (frame_status & 0x00060000)
1342				dev->stats.rx_frame_errors++;
1343			if (frame_status & 0x00080000)
1344				dev->stats.rx_crc_errors++;
1345			if (frame_status & 0x00100000) {
1346				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1347					   " status %8.8x.\n",
1348					   dev->name, frame_status);
1349			}
1350		} else {
1351			struct sk_buff *skb;
1352#ifndef final_version
1353			if (netif_msg_rx_status(np))
1354				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1355					   ", bogus_cnt %d.\n",
1356					   pkt_len, boguscnt);
1357#endif
1358			/* Check if the packet is long enough to accept without copying
1359			   to a minimally-sized skbuff. */
1360			if (pkt_len < rx_copybreak &&
1361			    (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1362				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1363				dma_sync_single_for_cpu(&np->pci_dev->dev,
1364						le32_to_cpu(desc->frag[0].addr),
1365						np->rx_buf_sz, DMA_FROM_DEVICE);
1366				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1367				dma_sync_single_for_device(&np->pci_dev->dev,
1368						le32_to_cpu(desc->frag[0].addr),
1369						np->rx_buf_sz, DMA_FROM_DEVICE);
1370				skb_put(skb, pkt_len);
1371			} else {
1372				dma_unmap_single(&np->pci_dev->dev,
1373					le32_to_cpu(desc->frag[0].addr),
1374					np->rx_buf_sz, DMA_FROM_DEVICE);
1375				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1376				np->rx_skbuff[entry] = NULL;
1377			}
1378			skb->protocol = eth_type_trans(skb, dev);
1379			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1380			netif_rx(skb);
1381		}
1382		entry = (entry + 1) % RX_RING_SIZE;
1383		received++;
1384	}
1385	np->cur_rx = entry;
1386	refill_rx (dev);
1387	np->budget -= received;
1388	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1389	return;
1390
1391not_done:
1392	np->cur_rx = entry;
1393	refill_rx (dev);
1394	if (!received)
1395		received = 1;
1396	np->budget -= received;
1397	if (np->budget <= 0)
1398		np->budget = RX_BUDGET;
1399	tasklet_schedule(&np->rx_tasklet);
1400}
1401
1402static void refill_rx (struct net_device *dev)
1403{
1404	struct netdev_private *np = netdev_priv(dev);
1405	int entry;
1406	int cnt = 0;
1407
1408	/* Refill the Rx ring buffers. */
1409	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1410		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1411		struct sk_buff *skb;
1412		entry = np->dirty_rx % RX_RING_SIZE;
1413		if (np->rx_skbuff[entry] == NULL) {
1414			skb = dev_alloc_skb(np->rx_buf_sz + 2);
1415			np->rx_skbuff[entry] = skb;
1416			if (skb == NULL)
1417				break;		/* Better luck next round. */
1418			skb->dev = dev;		/* Mark as being used by this device. */
1419			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1420			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1421				dma_map_single(&np->pci_dev->dev, skb->data,
1422					np->rx_buf_sz, DMA_FROM_DEVICE));
1423			if (dma_mapping_error(&np->pci_dev->dev,
1424				    np->rx_ring[entry].frag[0].addr)) {
1425			    dev_kfree_skb_irq(skb);
1426			    np->rx_skbuff[entry] = NULL;
1427			    break;
1428			}
1429		}
1430		/* Perhaps we need not reset this field. */
1431		np->rx_ring[entry].frag[0].length =
1432			cpu_to_le32(np->rx_buf_sz | LastFrag);
1433		np->rx_ring[entry].status = 0;
1434		cnt++;
1435	}
1436}
1437static void netdev_error(struct net_device *dev, int intr_status)
1438{
1439	struct netdev_private *np = netdev_priv(dev);
1440	void __iomem *ioaddr = np->base;
1441	u16 mii_ctl, mii_advertise, mii_lpa;
1442	int speed;
1443
1444	if (intr_status & LinkChange) {
1445		if (mdio_wait_link(dev, 10) == 0) {
1446			printk(KERN_INFO "%s: Link up\n", dev->name);
1447			if (np->an_enable) {
1448				mii_advertise = mdio_read(dev, np->phys[0],
1449							   MII_ADVERTISE);
1450				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1451				mii_advertise &= mii_lpa;
1452				printk(KERN_INFO "%s: Link changed: ",
1453					dev->name);
1454				if (mii_advertise & ADVERTISE_100FULL) {
1455					np->speed = 100;
1456					printk("100Mbps, full duplex\n");
1457				} else if (mii_advertise & ADVERTISE_100HALF) {
1458					np->speed = 100;
1459					printk("100Mbps, half duplex\n");
1460				} else if (mii_advertise & ADVERTISE_10FULL) {
1461					np->speed = 10;
1462					printk("10Mbps, full duplex\n");
1463				} else if (mii_advertise & ADVERTISE_10HALF) {
1464					np->speed = 10;
1465					printk("10Mbps, half duplex\n");
1466				} else
1467					printk("\n");
1468
1469			} else {
1470				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1471				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1472				np->speed = speed;
1473				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1474					dev->name, speed);
1475				printk("%s duplex.\n",
1476					(mii_ctl & BMCR_FULLDPLX) ?
1477						"full" : "half");
1478			}
1479			check_duplex(dev);
1480			if (np->flowctrl && np->mii_if.full_duplex) {
1481				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1482					ioaddr + MulticastFilter1+2);
1483				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1484					ioaddr + MACCtrl0);
1485			}
1486			netif_carrier_on(dev);
1487		} else {
1488			printk(KERN_INFO "%s: Link down\n", dev->name);
1489			netif_carrier_off(dev);
1490		}
1491	}
1492	if (intr_status & StatsMax) {
1493		get_stats(dev);
1494	}
1495	if (intr_status & IntrPCIErr) {
1496		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1497			   dev->name, intr_status);
1498		/* We must do a global reset of DMA to continue. */
1499	}
1500}
1501
1502static struct net_device_stats *get_stats(struct net_device *dev)
1503{
1504	struct netdev_private *np = netdev_priv(dev);
1505	void __iomem *ioaddr = np->base;
1506	unsigned long flags;
1507	u8 late_coll, single_coll, mult_coll;
1508
1509	spin_lock_irqsave(&np->statlock, flags);
1510	/* The chip only need report frame silently dropped. */
1511	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1512	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1513	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1514	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1515
1516	mult_coll = ioread8(ioaddr + StatsMultiColl);
1517	np->xstats.tx_multiple_collisions += mult_coll;
1518	single_coll = ioread8(ioaddr + StatsOneColl);
1519	np->xstats.tx_single_collisions += single_coll;
1520	late_coll = ioread8(ioaddr + StatsLateColl);
1521	np->xstats.tx_late_collisions += late_coll;
1522	dev->stats.collisions += mult_coll
1523		+ single_coll
1524		+ late_coll;
1525
1526	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1527	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1528	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1529	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1530	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1531	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1532	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1533
1534	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1535	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1536	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1537	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1538
1539	spin_unlock_irqrestore(&np->statlock, flags);
1540
1541	return &dev->stats;
1542}
1543
1544static void set_rx_mode(struct net_device *dev)
1545{
1546	struct netdev_private *np = netdev_priv(dev);
1547	void __iomem *ioaddr = np->base;
1548	u16 mc_filter[4];			/* Multicast hash filter */
1549	u32 rx_mode;
1550	int i;
1551
1552	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1553		memset(mc_filter, 0xff, sizeof(mc_filter));
1554		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1555	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1556		   (dev->flags & IFF_ALLMULTI)) {
1557		/* Too many to match, or accept all multicasts. */
1558		memset(mc_filter, 0xff, sizeof(mc_filter));
1559		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1560	} else if (!netdev_mc_empty(dev)) {
1561		struct netdev_hw_addr *ha;
1562		int bit;
1563		int index;
1564		int crc;
1565		memset (mc_filter, 0, sizeof (mc_filter));
1566		netdev_for_each_mc_addr(ha, dev) {
1567			crc = ether_crc_le(ETH_ALEN, ha->addr);
1568			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1569				if (crc & 0x80000000) index |= 1 << bit;
1570			mc_filter[index/16] |= (1 << (index % 16));
1571		}
1572		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1573	} else {
1574		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1575		return;
1576	}
1577	if (np->mii_if.full_duplex && np->flowctrl)
1578		mc_filter[3] |= 0x0200;
1579
1580	for (i = 0; i < 4; i++)
1581		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1582	iowrite8(rx_mode, ioaddr + RxMode);
1583}
1584
1585static int __set_mac_addr(struct net_device *dev)
1586{
1587	struct netdev_private *np = netdev_priv(dev);
1588	u16 addr16;
1589
1590	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1591	iowrite16(addr16, np->base + StationAddr);
1592	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1593	iowrite16(addr16, np->base + StationAddr+2);
1594	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1595	iowrite16(addr16, np->base + StationAddr+4);
1596	return 0;
1597}
1598
1599/* Invoked with rtnl_lock held */
1600static int sundance_set_mac_addr(struct net_device *dev, void *data)
1601{
1602	const struct sockaddr *addr = data;
1603
1604	if (!is_valid_ether_addr(addr->sa_data))
1605		return -EINVAL;
1606	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1607	__set_mac_addr(dev);
1608
1609	return 0;
1610}
1611
1612static const struct {
1613	const char name[ETH_GSTRING_LEN];
1614} sundance_stats[] = {
1615	{ "tx_multiple_collisions" },
1616	{ "tx_single_collisions" },
1617	{ "tx_late_collisions" },
1618	{ "tx_deferred" },
1619	{ "tx_deferred_excessive" },
1620	{ "tx_aborted" },
1621	{ "tx_bcasts" },
1622	{ "rx_bcasts" },
1623	{ "tx_mcasts" },
1624	{ "rx_mcasts" },
1625};
1626
1627static int check_if_running(struct net_device *dev)
1628{
1629	if (!netif_running(dev))
1630		return -EINVAL;
1631	return 0;
1632}
1633
1634static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1635{
1636	struct netdev_private *np = netdev_priv(dev);
1637	strcpy(info->driver, DRV_NAME);
1638	strcpy(info->version, DRV_VERSION);
1639	strcpy(info->bus_info, pci_name(np->pci_dev));
1640}
1641
1642static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1643{
1644	struct netdev_private *np = netdev_priv(dev);
1645	spin_lock_irq(&np->lock);
1646	mii_ethtool_gset(&np->mii_if, ecmd);
1647	spin_unlock_irq(&np->lock);
1648	return 0;
1649}
1650
1651static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1652{
1653	struct netdev_private *np = netdev_priv(dev);
1654	int res;
1655	spin_lock_irq(&np->lock);
1656	res = mii_ethtool_sset(&np->mii_if, ecmd);
1657	spin_unlock_irq(&np->lock);
1658	return res;
1659}
1660
1661static int nway_reset(struct net_device *dev)
1662{
1663	struct netdev_private *np = netdev_priv(dev);
1664	return mii_nway_restart(&np->mii_if);
1665}
1666
1667static u32 get_link(struct net_device *dev)
1668{
1669	struct netdev_private *np = netdev_priv(dev);
1670	return mii_link_ok(&np->mii_if);
1671}
1672
1673static u32 get_msglevel(struct net_device *dev)
1674{
1675	struct netdev_private *np = netdev_priv(dev);
1676	return np->msg_enable;
1677}
1678
1679static void set_msglevel(struct net_device *dev, u32 val)
1680{
1681	struct netdev_private *np = netdev_priv(dev);
1682	np->msg_enable = val;
1683}
1684
1685static void get_strings(struct net_device *dev, u32 stringset,
1686		u8 *data)
1687{
1688	if (stringset == ETH_SS_STATS)
1689		memcpy(data, sundance_stats, sizeof(sundance_stats));
1690}
1691
1692static int get_sset_count(struct net_device *dev, int sset)
1693{
1694	switch (sset) {
1695	case ETH_SS_STATS:
1696		return ARRAY_SIZE(sundance_stats);
1697	default:
1698		return -EOPNOTSUPP;
1699	}
1700}
1701
1702static void get_ethtool_stats(struct net_device *dev,
1703		struct ethtool_stats *stats, u64 *data)
1704{
1705	struct netdev_private *np = netdev_priv(dev);
1706	int i = 0;
1707
1708	get_stats(dev);
1709	data[i++] = np->xstats.tx_multiple_collisions;
1710	data[i++] = np->xstats.tx_single_collisions;
1711	data[i++] = np->xstats.tx_late_collisions;
1712	data[i++] = np->xstats.tx_deferred;
1713	data[i++] = np->xstats.tx_deferred_excessive;
1714	data[i++] = np->xstats.tx_aborted;
1715	data[i++] = np->xstats.tx_bcasts;
1716	data[i++] = np->xstats.rx_bcasts;
1717	data[i++] = np->xstats.tx_mcasts;
1718	data[i++] = np->xstats.rx_mcasts;
1719}
1720
1721static const struct ethtool_ops ethtool_ops = {
1722	.begin = check_if_running,
1723	.get_drvinfo = get_drvinfo,
1724	.get_settings = get_settings,
1725	.set_settings = set_settings,
1726	.nway_reset = nway_reset,
1727	.get_link = get_link,
1728	.get_msglevel = get_msglevel,
1729	.set_msglevel = set_msglevel,
1730	.get_strings = get_strings,
1731	.get_sset_count = get_sset_count,
1732	.get_ethtool_stats = get_ethtool_stats,
1733};
1734
1735static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1736{
1737	struct netdev_private *np = netdev_priv(dev);
1738	int rc;
1739
1740	if (!netif_running(dev))
1741		return -EINVAL;
1742
1743	spin_lock_irq(&np->lock);
1744	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1745	spin_unlock_irq(&np->lock);
1746
1747	return rc;
1748}
1749
1750static int netdev_close(struct net_device *dev)
1751{
1752	struct netdev_private *np = netdev_priv(dev);
1753	void __iomem *ioaddr = np->base;
1754	struct sk_buff *skb;
1755	int i;
1756
1757	/* Wait and kill tasklet */
1758	tasklet_kill(&np->rx_tasklet);
1759	tasklet_kill(&np->tx_tasklet);
1760	np->cur_tx = 0;
1761	np->dirty_tx = 0;
1762	np->cur_task = 0;
1763	np->last_tx = NULL;
1764
1765	netif_stop_queue(dev);
1766
1767	if (netif_msg_ifdown(np)) {
1768		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1769			   "Rx %4.4x Int %2.2x.\n",
1770			   dev->name, ioread8(ioaddr + TxStatus),
1771			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1772		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1773			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1774	}
1775
1776	/* Disable interrupts by clearing the interrupt mask. */
1777	iowrite16(0x0000, ioaddr + IntrEnable);
1778
1779	/* Disable Rx and Tx DMA for safely release resource */
1780	iowrite32(0x500, ioaddr + DMACtrl);
1781
1782	/* Stop the chip's Tx and Rx processes. */
1783	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1784
1785    	for (i = 2000; i > 0; i--) {
1786 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1787			break;
1788		mdelay(1);
1789    	}
1790
1791    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1792			ioaddr + ASIC_HI_WORD(ASICCtrl));
1793
1794    	for (i = 2000; i > 0; i--) {
1795		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1796			break;
1797		mdelay(1);
1798    	}
1799
1800#ifdef __i386__
1801	if (netif_msg_hw(np)) {
1802		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1803			   (int)(np->tx_ring_dma));
1804		for (i = 0; i < TX_RING_SIZE; i++)
1805			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1806				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1807				   np->tx_ring[i].frag[0].length);
1808		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1809			   (int)(np->rx_ring_dma));
1810		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1811			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1812				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1813				   np->rx_ring[i].frag[0].length);
1814		}
1815	}
1816#endif /* __i386__ debugging only */
1817
1818	free_irq(dev->irq, dev);
1819
1820	del_timer_sync(&np->timer);
1821
1822	/* Free all the skbuffs in the Rx queue. */
1823	for (i = 0; i < RX_RING_SIZE; i++) {
1824		np->rx_ring[i].status = 0;
1825		skb = np->rx_skbuff[i];
1826		if (skb) {
1827			dma_unmap_single(&np->pci_dev->dev,
1828				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1829				np->rx_buf_sz, DMA_FROM_DEVICE);
1830			dev_kfree_skb(skb);
1831			np->rx_skbuff[i] = NULL;
1832		}
1833		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1834	}
1835	for (i = 0; i < TX_RING_SIZE; i++) {
1836		np->tx_ring[i].next_desc = 0;
1837		skb = np->tx_skbuff[i];
1838		if (skb) {
1839			dma_unmap_single(&np->pci_dev->dev,
1840				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1841				skb->len, DMA_TO_DEVICE);
1842			dev_kfree_skb(skb);
1843			np->tx_skbuff[i] = NULL;
1844		}
1845	}
1846
1847	return 0;
1848}
1849
1850static void __devexit sundance_remove1 (struct pci_dev *pdev)
1851{
1852	struct net_device *dev = pci_get_drvdata(pdev);
1853
1854	if (dev) {
1855	    struct netdev_private *np = netdev_priv(dev);
1856	    unregister_netdev(dev);
1857	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1858		    np->rx_ring, np->rx_ring_dma);
1859	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1860		    np->tx_ring, np->tx_ring_dma);
1861	    pci_iounmap(pdev, np->base);
1862	    pci_release_regions(pdev);
1863	    free_netdev(dev);
1864	    pci_set_drvdata(pdev, NULL);
1865	}
1866}
1867
1868#ifdef CONFIG_PM
1869
1870static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1871{
1872	struct net_device *dev = pci_get_drvdata(pci_dev);
1873
1874	if (!netif_running(dev))
1875		return 0;
1876
1877	netdev_close(dev);
1878	netif_device_detach(dev);
1879
1880	pci_save_state(pci_dev);
1881	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1882
1883	return 0;
1884}
1885
1886static int sundance_resume(struct pci_dev *pci_dev)
1887{
1888	struct net_device *dev = pci_get_drvdata(pci_dev);
1889	int err = 0;
1890
1891	if (!netif_running(dev))
1892		return 0;
1893
1894	pci_set_power_state(pci_dev, PCI_D0);
1895	pci_restore_state(pci_dev);
1896
1897	err = netdev_open(dev);
1898	if (err) {
1899		printk(KERN_ERR "%s: Can't resume interface!\n",
1900				dev->name);
1901		goto out;
1902	}
1903
1904	netif_device_attach(dev);
1905
1906out:
1907	return err;
1908}
1909
1910#endif /* CONFIG_PM */
1911
1912static struct pci_driver sundance_driver = {
1913	.name		= DRV_NAME,
1914	.id_table	= sundance_pci_tbl,
1915	.probe		= sundance_probe1,
1916	.remove		= __devexit_p(sundance_remove1),
1917#ifdef CONFIG_PM
1918	.suspend	= sundance_suspend,
1919	.resume		= sundance_resume,
1920#endif /* CONFIG_PM */
1921};
1922
1923static int __init sundance_init(void)
1924{
1925/* when a module, this is printed whether or not devices are found in probe */
1926#ifdef MODULE
1927	printk(version);
1928#endif
1929	return pci_register_driver(&sundance_driver);
1930}
1931
1932static void __exit sundance_exit(void)
1933{
1934	pci_unregister_driver(&sundance_driver);
1935}
1936
1937module_init(sundance_init);
1938module_exit(sundance_exit);
1939
1940