Linux Audio

Check our new training course

Loading...
v6.8
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	https://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
 
 
 
  26
  27/* The user-configurable values.
  28   These may be modified when a driver module is loaded.*/
  29static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  30/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  31   Typical is a 64 element hash table based on the Ethernet CRC.  */
  32static const int multicast_filter_limit = 32;
  33
  34/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  35   Setting to > 1518 effectively disables this feature.
  36   This chip can receive into offset buffers, so the Alpha does not
  37   need a copy-align. */
  38static int rx_copybreak;
  39static int flowctrl=1;
  40
  41/* media[] specifies the media type the NIC operates at.
  42		 autosense	Autosensing active media.
  43		 10mbps_hd 	10Mbps half duplex.
  44		 10mbps_fd 	10Mbps full duplex.
  45		 100mbps_hd 	100Mbps half duplex.
  46		 100mbps_fd 	100Mbps full duplex.
  47		 0		Autosensing active media.
  48		 1	 	10Mbps half duplex.
  49		 2	 	10Mbps full duplex.
  50		 3	 	100Mbps half duplex.
  51		 4	 	100Mbps full duplex.
  52*/
  53#define MAX_UNITS 8
  54static char *media[MAX_UNITS];
  55
  56
  57/* Operational parameters that are set at compile time. */
  58
  59/* Keep the ring sizes a power of two for compile efficiency.
  60   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  61   Making the Tx ring too large decreases the effectiveness of channel
  62   bonding and packet priority, and more than 128 requires modifying the
  63   Tx error recovery.
  64   Large receive rings merely waste memory. */
  65#define TX_RING_SIZE	32
  66#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  67#define RX_RING_SIZE	64
  68#define RX_BUDGET	32
  69#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  70#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  71
  72/* Operational parameters that usually are not changed. */
  73/* Time in jiffies before concluding the transmitter is hung. */
  74#define TX_TIMEOUT  (4*HZ)
  75#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  76
  77/* Include files, designed to support most kernel versions 2.0.0 and later. */
  78#include <linux/module.h>
  79#include <linux/kernel.h>
  80#include <linux/string.h>
  81#include <linux/timer.h>
  82#include <linux/errno.h>
  83#include <linux/ioport.h>
  84#include <linux/interrupt.h>
  85#include <linux/pci.h>
  86#include <linux/netdevice.h>
  87#include <linux/etherdevice.h>
  88#include <linux/skbuff.h>
  89#include <linux/init.h>
  90#include <linux/bitops.h>
  91#include <linux/uaccess.h>
  92#include <asm/processor.h>		/* Processor type for cache alignment. */
  93#include <asm/io.h>
  94#include <linux/delay.h>
  95#include <linux/spinlock.h>
  96#include <linux/dma-mapping.h>
  97#include <linux/crc32.h>
  98#include <linux/ethtool.h>
  99#include <linux/mii.h>
 100
 
 
 
 
 
 101MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 102MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 103MODULE_LICENSE("GPL");
 104
 105module_param(debug, int, 0);
 106module_param(rx_copybreak, int, 0);
 107module_param_array(media, charp, NULL, 0);
 108module_param(flowctrl, int, 0);
 109MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 110MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 111MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 112
 113/*
 114				Theory of Operation
 115
 116I. Board Compatibility
 117
 118This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 119
 120II. Board-specific settings
 121
 122III. Driver operation
 123
 124IIIa. Ring buffers
 125
 126This driver uses two statically allocated fixed-size descriptor lists
 127formed into rings by a branch from the final descriptor to the beginning of
 128the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 129Some chips explicitly use only 2^N sized rings, while others use a
 130'next descriptor' pointer that the driver forms into rings.
 131
 132IIIb/c. Transmit/Receive Structure
 133
 134This driver uses a zero-copy receive and transmit scheme.
 135The driver allocates full frame size skbuffs for the Rx ring buffers at
 136open() time and passes the skb->data field to the chip as receive data
 137buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 138a fresh skbuff is allocated and the frame is copied to the new skbuff.
 139When the incoming frame is larger, the skbuff is passed directly up the
 140protocol stack.  Buffers consumed this way are replaced by newly allocated
 141skbuffs in a later phase of receives.
 142
 143The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 144using a full-sized skbuff for small frames vs. the copying costs of larger
 145frames.  New boards are typically used in generously configured machines
 146and the underfilled buffers have negligible impact compared to the benefit of
 147a single allocation size, so the default value of zero results in never
 148copying packets.  When copying is done, the cost is usually mitigated by using
 149a combined copy/checksum routine.  Copying also preloads the cache, which is
 150most useful with small frames.
 151
 152A subtle aspect of the operation is that the IP header at offset 14 in an
 153ethernet frame isn't longword aligned for further processing.
 154Unaligned buffers are permitted by the Sundance hardware, so
 155frames are received into the skbuff at an offset of "+2", 16-byte aligning
 156the IP header.
 157
 158IIId. Synchronization
 159
 160The driver runs as two independent, single-threaded flows of control.  One
 161is the send-packet routine, which enforces single-threaded use by the
 162dev->tbusy flag.  The other thread is the interrupt handler, which is single
 163threaded by the hardware and interrupt handling software.
 164
 165The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 166flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 167queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 168the 'lp->tx_full' flag.
 169
 170The interrupt handler has exclusive control over the Rx ring and records stats
 171from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 172empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 173clears both the tx_full and tbusy flags.
 174
 175IV. Notes
 176
 177IVb. References
 178
 179The Sundance ST201 datasheet, preliminary version.
 180The Kendin KS8723 datasheet, preliminary version.
 181The ICplus IP100 datasheet, preliminary version.
 182http://www.scyld.com/expert/100mbps.html
 183http://www.scyld.com/expert/NWay.html
 184
 185IVc. Errata
 186
 187*/
 188
 189/* Work-around for Kendin chip bugs. */
 190#ifndef CONFIG_SUNDANCE_MMIO
 191#define USE_IO_OPS 1
 192#endif
 193
 194static const struct pci_device_id sundance_pci_tbl[] = {
 195	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 196	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 197	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 198	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 199	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 200	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 201	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 202	{ }
 203};
 204MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 205
 206enum {
 207	netdev_io_size = 128
 208};
 209
 210struct pci_id_info {
 211        const char *name;
 212};
 213static const struct pci_id_info pci_id_tbl[] = {
 214	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 215	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 216	{"D-Link DFE-580TX 4 port Server Adapter"},
 217	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 218	{"D-Link DL10050-based FAST Ethernet Adapter"},
 219	{"Sundance Technology Alta"},
 220	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 221	{ }	/* terminate list. */
 222};
 223
 224/* This driver was written to use PCI memory space, however x86-oriented
 225   hardware often uses I/O space accesses. */
 226
 227/* Offsets to the device registers.
 228   Unlike software-only systems, device drivers interact with complex hardware.
 229   It's not useful to define symbolic names for every register bit in the
 230   device.  The name can only partially document the semantics and make
 231   the driver longer and more difficult to read.
 232   In general, only the important configuration values or bits changed
 233   multiple times should be defined symbolically.
 234*/
 235enum alta_offsets {
 236	DMACtrl = 0x00,
 237	TxListPtr = 0x04,
 238	TxDMABurstThresh = 0x08,
 239	TxDMAUrgentThresh = 0x09,
 240	TxDMAPollPeriod = 0x0a,
 241	RxDMAStatus = 0x0c,
 242	RxListPtr = 0x10,
 243	DebugCtrl0 = 0x1a,
 244	DebugCtrl1 = 0x1c,
 245	RxDMABurstThresh = 0x14,
 246	RxDMAUrgentThresh = 0x15,
 247	RxDMAPollPeriod = 0x16,
 248	LEDCtrl = 0x1a,
 249	ASICCtrl = 0x30,
 250	EEData = 0x34,
 251	EECtrl = 0x36,
 252	FlashAddr = 0x40,
 253	FlashData = 0x44,
 254	WakeEvent = 0x45,
 255	TxStatus = 0x46,
 256	TxFrameId = 0x47,
 257	DownCounter = 0x18,
 258	IntrClear = 0x4a,
 259	IntrEnable = 0x4c,
 260	IntrStatus = 0x4e,
 261	MACCtrl0 = 0x50,
 262	MACCtrl1 = 0x52,
 263	StationAddr = 0x54,
 264	MaxFrameSize = 0x5A,
 265	RxMode = 0x5c,
 266	MIICtrl = 0x5e,
 267	MulticastFilter0 = 0x60,
 268	MulticastFilter1 = 0x64,
 269	RxOctetsLow = 0x68,
 270	RxOctetsHigh = 0x6a,
 271	TxOctetsLow = 0x6c,
 272	TxOctetsHigh = 0x6e,
 273	TxFramesOK = 0x70,
 274	RxFramesOK = 0x72,
 275	StatsCarrierError = 0x74,
 276	StatsLateColl = 0x75,
 277	StatsMultiColl = 0x76,
 278	StatsOneColl = 0x77,
 279	StatsTxDefer = 0x78,
 280	RxMissed = 0x79,
 281	StatsTxXSDefer = 0x7a,
 282	StatsTxAbort = 0x7b,
 283	StatsBcastTx = 0x7c,
 284	StatsBcastRx = 0x7d,
 285	StatsMcastTx = 0x7e,
 286	StatsMcastRx = 0x7f,
 287	/* Aliased and bogus values! */
 288	RxStatus = 0x0c,
 289};
 290
 291#define ASIC_HI_WORD(x)	((x) + 2)
 292
 293enum ASICCtrl_HiWord_bit {
 294	GlobalReset = 0x0001,
 295	RxReset = 0x0002,
 296	TxReset = 0x0004,
 297	DMAReset = 0x0008,
 298	FIFOReset = 0x0010,
 299	NetworkReset = 0x0020,
 300	HostReset = 0x0040,
 301	ResetBusy = 0x0400,
 302};
 303
 304/* Bits in the interrupt status/mask registers. */
 305enum intr_status_bits {
 306	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 307	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 308	IntrDrvRqst=0x0040,
 309	StatsMax=0x0080, LinkChange=0x0100,
 310	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 311};
 312
 313/* Bits in the RxMode register. */
 314enum rx_mode_bits {
 315	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 316	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 317};
 318/* Bits in MACCtrl. */
 319enum mac_ctrl0_bits {
 320	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 321	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 322};
 323enum mac_ctrl1_bits {
 324	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 325	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 326	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 327};
 328
 329/* Bits in WakeEvent register. */
 330enum wake_event_bits {
 331	WakePktEnable = 0x01,
 332	MagicPktEnable = 0x02,
 333	LinkEventEnable = 0x04,
 334	WolEnable = 0x80,
 335};
 336
 337/* The Rx and Tx buffer descriptors. */
 338/* Note that using only 32 bit fields simplifies conversion to big-endian
 339   architectures. */
 340struct netdev_desc {
 341	__le32 next_desc;
 342	__le32 status;
 343	struct desc_frag { __le32 addr, length; } frag;
 344};
 345
 346/* Bits in netdev_desc.status */
 347enum desc_status_bits {
 348	DescOwn=0x8000,
 349	DescEndPacket=0x4000,
 350	DescEndRing=0x2000,
 351	LastFrag=0x80000000,
 352	DescIntrOnTx=0x8000,
 353	DescIntrOnDMADone=0x80000000,
 354	DisableAlign = 0x00000001,
 355};
 356
 357#define PRIV_ALIGN	15 	/* Required alignment mask */
 358/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 359   within the structure. */
 360#define MII_CNT		4
 361struct netdev_private {
 362	/* Descriptor rings first for alignment. */
 363	struct netdev_desc *rx_ring;
 364	struct netdev_desc *tx_ring;
 365	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 366	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 367        dma_addr_t tx_ring_dma;
 368        dma_addr_t rx_ring_dma;
 369	struct timer_list timer;		/* Media monitoring timer. */
 370	struct net_device *ndev;		/* backpointer */
 371	/* ethtool extra stats */
 372	struct {
 373		u64 tx_multiple_collisions;
 374		u64 tx_single_collisions;
 375		u64 tx_late_collisions;
 376		u64 tx_deferred;
 377		u64 tx_deferred_excessive;
 378		u64 tx_aborted;
 379		u64 tx_bcasts;
 380		u64 rx_bcasts;
 381		u64 tx_mcasts;
 382		u64 rx_mcasts;
 383	} xstats;
 384	/* Frequently used values: keep some adjacent for cache effect. */
 385	spinlock_t lock;
 386	int msg_enable;
 387	int chip_id;
 388	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 389	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 390	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 391	unsigned int cur_tx, dirty_tx;
 392	/* These values are keep track of the transceiver/media in use. */
 393	unsigned int flowctrl:1;
 394	unsigned int default_port:4;		/* Last dev->if_port value. */
 395	unsigned int an_enable:1;
 396	unsigned int speed;
 397	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
 398	struct tasklet_struct rx_tasklet;
 399	struct tasklet_struct tx_tasklet;
 400	int budget;
 401	int cur_task;
 402	/* Multicast and receive mode. */
 403	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 404	u16 mcast_filter[4];
 405	/* MII transceiver section. */
 406	struct mii_if_info mii_if;
 407	int mii_preamble_required;
 408	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 409	struct pci_dev *pci_dev;
 410	void __iomem *base;
 411	spinlock_t statlock;
 412};
 413
 414/* The station address location in the EEPROM. */
 415#define EEPROM_SA_OFFSET	0x10
 416#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 417			IntrDrvRqst | IntrTxDone | StatsMax | \
 418			LinkChange)
 419
 420static int  change_mtu(struct net_device *dev, int new_mtu);
 421static int  eeprom_read(void __iomem *ioaddr, int location);
 422static int  mdio_read(struct net_device *dev, int phy_id, int location);
 423static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 424static int  mdio_wait_link(struct net_device *dev, int wait);
 425static int  netdev_open(struct net_device *dev);
 426static void check_duplex(struct net_device *dev);
 427static void netdev_timer(struct timer_list *t);
 428static void tx_timeout(struct net_device *dev, unsigned int txqueue);
 429static void init_ring(struct net_device *dev);
 430static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 431static int reset_tx (struct net_device *dev);
 432static irqreturn_t intr_handler(int irq, void *dev_instance);
 433static void rx_poll(struct tasklet_struct *t);
 434static void tx_poll(struct tasklet_struct *t);
 435static void refill_rx (struct net_device *dev);
 436static void netdev_error(struct net_device *dev, int intr_status);
 437static void netdev_error(struct net_device *dev, int intr_status);
 438static void set_rx_mode(struct net_device *dev);
 439static int __set_mac_addr(struct net_device *dev);
 440static int sundance_set_mac_addr(struct net_device *dev, void *data);
 441static struct net_device_stats *get_stats(struct net_device *dev);
 442static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 443static int  netdev_close(struct net_device *dev);
 444static const struct ethtool_ops ethtool_ops;
 445
 446static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 447{
 448	struct netdev_private *np = netdev_priv(dev);
 449	void __iomem *ioaddr = np->base + ASICCtrl;
 450	int countdown;
 451
 452	/* ST201 documentation states ASICCtrl is a 32bit register */
 453	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 454	/* ST201 documentation states reset can take up to 1 ms */
 455	countdown = 10 + 1;
 456	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 457		if (--countdown == 0) {
 458			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 459			break;
 460		}
 461		udelay(100);
 462	}
 463}
 464
 465#ifdef CONFIG_NET_POLL_CONTROLLER
 466static void sundance_poll_controller(struct net_device *dev)
 467{
 468	struct netdev_private *np = netdev_priv(dev);
 469
 470	disable_irq(np->pci_dev->irq);
 471	intr_handler(np->pci_dev->irq, dev);
 472	enable_irq(np->pci_dev->irq);
 473}
 474#endif
 475
 476static const struct net_device_ops netdev_ops = {
 477	.ndo_open		= netdev_open,
 478	.ndo_stop		= netdev_close,
 479	.ndo_start_xmit		= start_tx,
 480	.ndo_get_stats 		= get_stats,
 481	.ndo_set_rx_mode	= set_rx_mode,
 482	.ndo_eth_ioctl		= netdev_ioctl,
 483	.ndo_tx_timeout		= tx_timeout,
 484	.ndo_change_mtu		= change_mtu,
 485	.ndo_set_mac_address 	= sundance_set_mac_addr,
 486	.ndo_validate_addr	= eth_validate_addr,
 487#ifdef CONFIG_NET_POLL_CONTROLLER
 488	.ndo_poll_controller 	= sundance_poll_controller,
 489#endif
 490};
 491
 492static int sundance_probe1(struct pci_dev *pdev,
 493			   const struct pci_device_id *ent)
 494{
 495	struct net_device *dev;
 496	struct netdev_private *np;
 497	static int card_idx;
 498	int chip_idx = ent->driver_data;
 499	int irq;
 500	int i;
 501	void __iomem *ioaddr;
 502	u16 mii_ctl;
 503	void *ring_space;
 504	dma_addr_t ring_dma;
 505#ifdef USE_IO_OPS
 506	int bar = 0;
 507#else
 508	int bar = 1;
 509#endif
 510	int phy, phy_end, phy_idx = 0;
 511	__le16 addr[ETH_ALEN / 2];
 
 
 
 
 
 
 512
 513	if (pci_enable_device(pdev))
 514		return -EIO;
 515	pci_set_master(pdev);
 516
 517	irq = pdev->irq;
 518
 519	dev = alloc_etherdev(sizeof(*np));
 520	if (!dev)
 521		return -ENOMEM;
 522	SET_NETDEV_DEV(dev, &pdev->dev);
 523
 524	if (pci_request_regions(pdev, DRV_NAME))
 525		goto err_out_netdev;
 526
 527	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 528	if (!ioaddr)
 529		goto err_out_res;
 530
 531	for (i = 0; i < 3; i++)
 532		addr[i] =
 533			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 534	eth_hw_addr_set(dev, (u8 *)addr);
 535
 536	np = netdev_priv(dev);
 537	np->ndev = dev;
 538	np->base = ioaddr;
 539	np->pci_dev = pdev;
 540	np->chip_id = chip_idx;
 541	np->msg_enable = (1 << debug) - 1;
 542	spin_lock_init(&np->lock);
 543	spin_lock_init(&np->statlock);
 544	tasklet_setup(&np->rx_tasklet, rx_poll);
 545	tasklet_setup(&np->tx_tasklet, tx_poll);
 546
 547	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 548			&ring_dma, GFP_KERNEL);
 549	if (!ring_space)
 550		goto err_out_cleardev;
 551	np->tx_ring = (struct netdev_desc *)ring_space;
 552	np->tx_ring_dma = ring_dma;
 553
 554	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 555			&ring_dma, GFP_KERNEL);
 556	if (!ring_space)
 557		goto err_out_unmap_tx;
 558	np->rx_ring = (struct netdev_desc *)ring_space;
 559	np->rx_ring_dma = ring_dma;
 560
 561	np->mii_if.dev = dev;
 562	np->mii_if.mdio_read = mdio_read;
 563	np->mii_if.mdio_write = mdio_write;
 564	np->mii_if.phy_id_mask = 0x1f;
 565	np->mii_if.reg_num_mask = 0x1f;
 566
 567	/* The chip-specific entries in the device structure. */
 568	dev->netdev_ops = &netdev_ops;
 569	dev->ethtool_ops = &ethtool_ops;
 570	dev->watchdog_timeo = TX_TIMEOUT;
 571
 572	/* MTU range: 68 - 8191 */
 573	dev->min_mtu = ETH_MIN_MTU;
 574	dev->max_mtu = 8191;
 575
 576	pci_set_drvdata(pdev, dev);
 577
 578	i = register_netdev(dev);
 579	if (i)
 580		goto err_out_unmap_rx;
 581
 582	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 583	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 584	       dev->dev_addr, irq);
 585
 586	np->phys[0] = 1;		/* Default setting */
 587	np->mii_preamble_required++;
 588
 589	/*
 590	 * It seems some phys doesn't deal well with address 0 being accessed
 591	 * first
 592	 */
 593	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 594		phy = 0;
 595		phy_end = 31;
 596	} else {
 597		phy = 1;
 598		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 599	}
 600	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 601		int phyx = phy & 0x1f;
 602		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 603		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 604			np->phys[phy_idx++] = phyx;
 605			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 606			if ((mii_status & 0x0040) == 0)
 607				np->mii_preamble_required++;
 608			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 609				   "0x%4.4x advertising %4.4x.\n",
 610				   dev->name, phyx, mii_status, np->mii_if.advertising);
 611		}
 612	}
 613	np->mii_preamble_required--;
 614
 615	if (phy_idx == 0) {
 616		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 617			   dev->name, ioread32(ioaddr + ASICCtrl));
 618		goto err_out_unregister;
 619	}
 620
 621	np->mii_if.phy_id = np->phys[0];
 622
 623	/* Parse override configuration */
 624	np->an_enable = 1;
 625	if (card_idx < MAX_UNITS) {
 626		if (media[card_idx] != NULL) {
 627			np->an_enable = 0;
 628			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 629			    strcmp (media[card_idx], "4") == 0) {
 630				np->speed = 100;
 631				np->mii_if.full_duplex = 1;
 632			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 633				   strcmp (media[card_idx], "3") == 0) {
 634				np->speed = 100;
 635				np->mii_if.full_duplex = 0;
 636			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 637				   strcmp (media[card_idx], "2") == 0) {
 638				np->speed = 10;
 639				np->mii_if.full_duplex = 1;
 640			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 641				   strcmp (media[card_idx], "1") == 0) {
 642				np->speed = 10;
 643				np->mii_if.full_duplex = 0;
 644			} else {
 645				np->an_enable = 1;
 646			}
 647		}
 648		if (flowctrl == 1)
 649			np->flowctrl = 1;
 650	}
 651
 652	/* Fibre PHY? */
 653	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 654		/* Default 100Mbps Full */
 655		if (np->an_enable) {
 656			np->speed = 100;
 657			np->mii_if.full_duplex = 1;
 658			np->an_enable = 0;
 659		}
 660	}
 661	/* Reset PHY */
 662	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 663	mdelay (300);
 664	/* If flow control enabled, we need to advertise it.*/
 665	if (np->flowctrl)
 666		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 667	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 668	/* Force media type */
 669	if (!np->an_enable) {
 670		mii_ctl = 0;
 671		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 672		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 673		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 674		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 675			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 676
 677	}
 678
 679	/* Perhaps move the reset here? */
 680	/* Reset the chip to erase previous misconfiguration. */
 681	if (netif_msg_hw(np))
 682		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 683	sundance_reset(dev, 0x00ff << 16);
 684	if (netif_msg_hw(np))
 685		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 686
 687	card_idx++;
 688	return 0;
 689
 690err_out_unregister:
 691	unregister_netdev(dev);
 692err_out_unmap_rx:
 693	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 694		np->rx_ring, np->rx_ring_dma);
 695err_out_unmap_tx:
 696	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 697		np->tx_ring, np->tx_ring_dma);
 698err_out_cleardev:
 699	pci_iounmap(pdev, ioaddr);
 700err_out_res:
 701	pci_release_regions(pdev);
 702err_out_netdev:
 703	free_netdev (dev);
 704	return -ENODEV;
 705}
 706
 707static int change_mtu(struct net_device *dev, int new_mtu)
 708{
 
 
 709	if (netif_running(dev))
 710		return -EBUSY;
 711	dev->mtu = new_mtu;
 712	return 0;
 713}
 714
 715#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 716/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 717static int eeprom_read(void __iomem *ioaddr, int location)
 718{
 719	int boguscnt = 10000;		/* Typical 1900 ticks. */
 720	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 721	do {
 722		eeprom_delay(ioaddr + EECtrl);
 723		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 724			return ioread16(ioaddr + EEData);
 725		}
 726	} while (--boguscnt > 0);
 727	return 0;
 728}
 729
 730/*  MII transceiver control section.
 731	Read and write the MII registers using software-generated serial
 732	MDIO protocol.  See the MII specifications or DP83840A data sheet
 733	for details.
 734
 735	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 736	met by back-to-back 33Mhz PCI cycles. */
 737#define mdio_delay() ioread8(mdio_addr)
 738
 739enum mii_reg_bits {
 740	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 741};
 742#define MDIO_EnbIn  (0)
 743#define MDIO_WRITE0 (MDIO_EnbOutput)
 744#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 745
 746/* Generate the preamble required for initial synchronization and
 747   a few older transceivers. */
 748static void mdio_sync(void __iomem *mdio_addr)
 749{
 750	int bits = 32;
 751
 752	/* Establish sync by sending at least 32 logic ones. */
 753	while (--bits >= 0) {
 754		iowrite8(MDIO_WRITE1, mdio_addr);
 755		mdio_delay();
 756		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 757		mdio_delay();
 758	}
 759}
 760
 761static int mdio_read(struct net_device *dev, int phy_id, int location)
 762{
 763	struct netdev_private *np = netdev_priv(dev);
 764	void __iomem *mdio_addr = np->base + MIICtrl;
 765	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 766	int i, retval = 0;
 767
 768	if (np->mii_preamble_required)
 769		mdio_sync(mdio_addr);
 770
 771	/* Shift the read command bits out. */
 772	for (i = 15; i >= 0; i--) {
 773		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 774
 775		iowrite8(dataval, mdio_addr);
 776		mdio_delay();
 777		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 778		mdio_delay();
 779	}
 780	/* Read the two transition, 16 data, and wire-idle bits. */
 781	for (i = 19; i > 0; i--) {
 782		iowrite8(MDIO_EnbIn, mdio_addr);
 783		mdio_delay();
 784		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 785		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 786		mdio_delay();
 787	}
 788	return (retval>>1) & 0xffff;
 789}
 790
 791static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 792{
 793	struct netdev_private *np = netdev_priv(dev);
 794	void __iomem *mdio_addr = np->base + MIICtrl;
 795	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 796	int i;
 797
 798	if (np->mii_preamble_required)
 799		mdio_sync(mdio_addr);
 800
 801	/* Shift the command bits out. */
 802	for (i = 31; i >= 0; i--) {
 803		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 804
 805		iowrite8(dataval, mdio_addr);
 806		mdio_delay();
 807		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 808		mdio_delay();
 809	}
 810	/* Clear out extra bits. */
 811	for (i = 2; i > 0; i--) {
 812		iowrite8(MDIO_EnbIn, mdio_addr);
 813		mdio_delay();
 814		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 815		mdio_delay();
 816	}
 817}
 818
 819static int mdio_wait_link(struct net_device *dev, int wait)
 820{
 821	int bmsr;
 822	int phy_id;
 823	struct netdev_private *np;
 824
 825	np = netdev_priv(dev);
 826	phy_id = np->phys[0];
 827
 828	do {
 829		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 830		if (bmsr & 0x0004)
 831			return 0;
 832		mdelay(1);
 833	} while (--wait > 0);
 834	return -1;
 835}
 836
 837static int netdev_open(struct net_device *dev)
 838{
 839	struct netdev_private *np = netdev_priv(dev);
 840	void __iomem *ioaddr = np->base;
 841	const int irq = np->pci_dev->irq;
 842	unsigned long flags;
 843	int i;
 844
 845	sundance_reset(dev, 0x00ff << 16);
 846
 847	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 848	if (i)
 849		return i;
 850
 851	if (netif_msg_ifup(np))
 852		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 853
 854	init_ring(dev);
 855
 856	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 857	/* The Tx list pointer is written as packets are queued. */
 858
 859	/* Initialize other registers. */
 860	__set_mac_addr(dev);
 861#if IS_ENABLED(CONFIG_VLAN_8021Q)
 862	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 863#else
 864	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 865#endif
 866	if (dev->mtu > 2047)
 867		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 868
 869	/* Configure the PCI bus bursts and FIFO thresholds. */
 870
 871	if (dev->if_port == 0)
 872		dev->if_port = np->default_port;
 873
 874	spin_lock_init(&np->mcastlock);
 875
 876	set_rx_mode(dev);
 877	iowrite16(0, ioaddr + IntrEnable);
 878	iowrite16(0, ioaddr + DownCounter);
 879	/* Set the chip to poll every N*320nsec. */
 880	iowrite8(100, ioaddr + RxDMAPollPeriod);
 881	iowrite8(127, ioaddr + TxDMAPollPeriod);
 882	/* Fix DFE-580TX packet drop issue */
 883	if (np->pci_dev->revision >= 0x14)
 884		iowrite8(0x01, ioaddr + DebugCtrl1);
 885	netif_start_queue(dev);
 886
 887	spin_lock_irqsave(&np->lock, flags);
 888	reset_tx(dev);
 889	spin_unlock_irqrestore(&np->lock, flags);
 890
 891	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 892
 893	/* Disable Wol */
 894	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 895	np->wol_enabled = 0;
 896
 897	if (netif_msg_ifup(np))
 898		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 899			   "MAC Control %x, %4.4x %4.4x.\n",
 900			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 901			   ioread32(ioaddr + MACCtrl0),
 902			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 903
 904	/* Set the timer to check for link beat. */
 905	timer_setup(&np->timer, netdev_timer, 0);
 906	np->timer.expires = jiffies + 3*HZ;
 
 
 907	add_timer(&np->timer);
 908
 909	/* Enable interrupts by setting the interrupt mask. */
 910	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 911
 912	return 0;
 913}
 914
 915static void check_duplex(struct net_device *dev)
 916{
 917	struct netdev_private *np = netdev_priv(dev);
 918	void __iomem *ioaddr = np->base;
 919	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 920	int negotiated = mii_lpa & np->mii_if.advertising;
 921	int duplex;
 922
 923	/* Force media */
 924	if (!np->an_enable || mii_lpa == 0xffff) {
 925		if (np->mii_if.full_duplex)
 926			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 927				ioaddr + MACCtrl0);
 928		return;
 929	}
 930
 931	/* Autonegotiation */
 932	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 933	if (np->mii_if.full_duplex != duplex) {
 934		np->mii_if.full_duplex = duplex;
 935		if (netif_msg_link(np))
 936			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 937				   "negotiated capability %4.4x.\n", dev->name,
 938				   duplex ? "full" : "half", np->phys[0], negotiated);
 939		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 940	}
 941}
 942
 943static void netdev_timer(struct timer_list *t)
 944{
 945	struct netdev_private *np = from_timer(np, t, timer);
 946	struct net_device *dev = np->mii_if.dev;
 947	void __iomem *ioaddr = np->base;
 948	int next_tick = 10*HZ;
 949
 950	if (netif_msg_timer(np)) {
 951		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 952			   "Tx %x Rx %x.\n",
 953			   dev->name, ioread16(ioaddr + IntrEnable),
 954			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 955	}
 956	check_duplex(dev);
 957	np->timer.expires = jiffies + next_tick;
 958	add_timer(&np->timer);
 959}
 960
 961static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 962{
 963	struct netdev_private *np = netdev_priv(dev);
 964	void __iomem *ioaddr = np->base;
 965	unsigned long flag;
 966
 967	netif_stop_queue(dev);
 968	tasklet_disable_in_atomic(&np->tx_tasklet);
 969	iowrite16(0, ioaddr + IntrEnable);
 970	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 971		   "TxFrameId %2.2x,"
 972		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 973		   ioread8(ioaddr + TxFrameId));
 974
 975	{
 976		int i;
 977		for (i=0; i<TX_RING_SIZE; i++) {
 978			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 979				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 980				le32_to_cpu(np->tx_ring[i].next_desc),
 981				le32_to_cpu(np->tx_ring[i].status),
 982				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 983				le32_to_cpu(np->tx_ring[i].frag.addr),
 984				le32_to_cpu(np->tx_ring[i].frag.length));
 985		}
 986		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 987			ioread32(np->base + TxListPtr),
 988			netif_queue_stopped(dev));
 989		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 990			np->cur_tx, np->cur_tx % TX_RING_SIZE,
 991			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 992		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 993		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 994	}
 995	spin_lock_irqsave(&np->lock, flag);
 996
 997	/* Stop and restart the chip's Tx processes . */
 998	reset_tx(dev);
 999	spin_unlock_irqrestore(&np->lock, flag);
1000
1001	dev->if_port = 0;
1002
1003	netif_trans_update(dev); /* prevent tx timeout */
1004	dev->stats.tx_errors++;
1005	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1006		netif_wake_queue(dev);
1007	}
1008	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1009	tasklet_enable(&np->tx_tasklet);
1010}
1011
1012
1013/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1014static void init_ring(struct net_device *dev)
1015{
1016	struct netdev_private *np = netdev_priv(dev);
1017	int i;
1018
1019	np->cur_rx = np->cur_tx = 0;
1020	np->dirty_rx = np->dirty_tx = 0;
1021	np->cur_task = 0;
1022
1023	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1024
1025	/* Initialize all Rx descriptors. */
1026	for (i = 0; i < RX_RING_SIZE; i++) {
1027		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1028			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1029		np->rx_ring[i].status = 0;
1030		np->rx_ring[i].frag.length = 0;
1031		np->rx_skbuff[i] = NULL;
1032	}
1033
1034	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1035	for (i = 0; i < RX_RING_SIZE; i++) {
1036		struct sk_buff *skb =
1037			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1038		np->rx_skbuff[i] = skb;
1039		if (skb == NULL)
1040			break;
1041		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1042		np->rx_ring[i].frag.addr = cpu_to_le32(
1043			dma_map_single(&np->pci_dev->dev, skb->data,
1044				np->rx_buf_sz, DMA_FROM_DEVICE));
1045		if (dma_mapping_error(&np->pci_dev->dev,
1046					np->rx_ring[i].frag.addr)) {
1047			dev_kfree_skb(skb);
1048			np->rx_skbuff[i] = NULL;
1049			break;
1050		}
1051		np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1052	}
1053	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1054
1055	for (i = 0; i < TX_RING_SIZE; i++) {
1056		np->tx_skbuff[i] = NULL;
1057		np->tx_ring[i].status = 0;
1058	}
1059}
1060
1061static void tx_poll(struct tasklet_struct *t)
1062{
1063	struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
 
1064	unsigned head = np->cur_task % TX_RING_SIZE;
1065	struct netdev_desc *txdesc =
1066		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1067
1068	/* Chain the next pointer */
1069	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1070		int entry = np->cur_task % TX_RING_SIZE;
1071		txdesc = &np->tx_ring[entry];
1072		if (np->last_tx) {
1073			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1074				entry*sizeof(struct netdev_desc));
1075		}
1076		np->last_tx = txdesc;
1077	}
1078	/* Indicate the latest descriptor of tx ring */
1079	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1080
1081	if (ioread32 (np->base + TxListPtr) == 0)
1082		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1083			np->base + TxListPtr);
1084}
1085
1086static netdev_tx_t
1087start_tx (struct sk_buff *skb, struct net_device *dev)
1088{
1089	struct netdev_private *np = netdev_priv(dev);
1090	struct netdev_desc *txdesc;
1091	unsigned entry;
1092
1093	/* Calculate the next Tx descriptor entry. */
1094	entry = np->cur_tx % TX_RING_SIZE;
1095	np->tx_skbuff[entry] = skb;
1096	txdesc = &np->tx_ring[entry];
1097
1098	txdesc->next_desc = 0;
1099	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1100	txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1101				skb->data, skb->len, DMA_TO_DEVICE));
1102	if (dma_mapping_error(&np->pci_dev->dev,
1103				txdesc->frag.addr))
1104			goto drop_frame;
1105	txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
1106
1107	/* Increment cur_tx before tasklet_schedule() */
1108	np->cur_tx++;
1109	mb();
1110	/* Schedule a tx_poll() task */
1111	tasklet_schedule(&np->tx_tasklet);
1112
1113	/* On some architectures: explicitly flush cache lines here. */
1114	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1115	    !netif_queue_stopped(dev)) {
1116		/* do nothing */
1117	} else {
1118		netif_stop_queue (dev);
1119	}
1120	if (netif_msg_tx_queued(np)) {
1121		printk (KERN_DEBUG
1122			"%s: Transmit frame #%d queued in slot %d.\n",
1123			dev->name, np->cur_tx, entry);
1124	}
1125	return NETDEV_TX_OK;
1126
1127drop_frame:
1128	dev_kfree_skb_any(skb);
1129	np->tx_skbuff[entry] = NULL;
1130	dev->stats.tx_dropped++;
1131	return NETDEV_TX_OK;
1132}
1133
1134/* Reset hardware tx and free all of tx buffers */
1135static int
1136reset_tx (struct net_device *dev)
1137{
1138	struct netdev_private *np = netdev_priv(dev);
1139	void __iomem *ioaddr = np->base;
1140	struct sk_buff *skb;
1141	int i;
1142
1143	/* Reset tx logic, TxListPtr will be cleaned */
1144	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1145	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1146
1147	/* free all tx skbuff */
1148	for (i = 0; i < TX_RING_SIZE; i++) {
1149		np->tx_ring[i].next_desc = 0;
1150
1151		skb = np->tx_skbuff[i];
1152		if (skb) {
1153			dma_unmap_single(&np->pci_dev->dev,
1154				le32_to_cpu(np->tx_ring[i].frag.addr),
1155				skb->len, DMA_TO_DEVICE);
1156			dev_kfree_skb_any(skb);
1157			np->tx_skbuff[i] = NULL;
1158			dev->stats.tx_dropped++;
1159		}
1160	}
1161	np->cur_tx = np->dirty_tx = 0;
1162	np->cur_task = 0;
1163
1164	np->last_tx = NULL;
1165	iowrite8(127, ioaddr + TxDMAPollPeriod);
1166
1167	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1168	return 0;
1169}
1170
1171/* The interrupt handler cleans up after the Tx thread,
1172   and schedule a Rx thread work */
1173static irqreturn_t intr_handler(int irq, void *dev_instance)
1174{
1175	struct net_device *dev = (struct net_device *)dev_instance;
1176	struct netdev_private *np = netdev_priv(dev);
1177	void __iomem *ioaddr = np->base;
1178	int hw_frame_id;
1179	int tx_cnt;
1180	int tx_status;
1181	int handled = 0;
1182	int i;
1183
 
1184	do {
1185		int intr_status = ioread16(ioaddr + IntrStatus);
1186		iowrite16(intr_status, ioaddr + IntrStatus);
1187
1188		if (netif_msg_intr(np))
1189			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1190				   dev->name, intr_status);
1191
1192		if (!(intr_status & DEFAULT_INTR))
1193			break;
1194
1195		handled = 1;
1196
1197		if (intr_status & (IntrRxDMADone)) {
1198			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1199					ioaddr + IntrEnable);
1200			if (np->budget < 0)
1201				np->budget = RX_BUDGET;
1202			tasklet_schedule(&np->rx_tasklet);
1203		}
1204		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1205			tx_status = ioread16 (ioaddr + TxStatus);
1206			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1207				if (netif_msg_tx_done(np))
1208					printk
1209					    ("%s: Transmit status is %2.2x.\n",
1210				     	dev->name, tx_status);
1211				if (tx_status & 0x1e) {
1212					if (netif_msg_tx_err(np))
1213						printk("%s: Transmit error status %4.4x.\n",
1214							   dev->name, tx_status);
1215					dev->stats.tx_errors++;
1216					if (tx_status & 0x10)
1217						dev->stats.tx_fifo_errors++;
1218					if (tx_status & 0x08)
1219						dev->stats.collisions++;
1220					if (tx_status & 0x04)
1221						dev->stats.tx_fifo_errors++;
1222					if (tx_status & 0x02)
1223						dev->stats.tx_window_errors++;
1224
1225					/*
1226					** This reset has been verified on
1227					** DFE-580TX boards ! phdm@macqel.be.
1228					*/
1229					if (tx_status & 0x10) {	/* TxUnderrun */
1230						/* Restart Tx FIFO and transmitter */
1231						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1232						/* No need to reset the Tx pointer here */
1233					}
1234					/* Restart the Tx. Need to make sure tx enabled */
1235					i = 10;
1236					do {
1237						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1238						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1239							break;
1240						mdelay(1);
1241					} while (--i);
1242				}
1243				/* Yup, this is a documentation bug.  It cost me *hours*. */
1244				iowrite16 (0, ioaddr + TxStatus);
1245				if (tx_cnt < 0) {
1246					iowrite32(5000, ioaddr + DownCounter);
1247					break;
1248				}
1249				tx_status = ioread16 (ioaddr + TxStatus);
1250			}
1251			hw_frame_id = (tx_status >> 8) & 0xff;
1252		} else 	{
1253			hw_frame_id = ioread8(ioaddr + TxFrameId);
1254		}
1255
1256		if (np->pci_dev->revision >= 0x14) {
1257			spin_lock(&np->lock);
1258			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1259				int entry = np->dirty_tx % TX_RING_SIZE;
1260				struct sk_buff *skb;
1261				int sw_frame_id;
1262				sw_frame_id = (le32_to_cpu(
1263					np->tx_ring[entry].status) >> 2) & 0xff;
1264				if (sw_frame_id == hw_frame_id &&
1265					!(le32_to_cpu(np->tx_ring[entry].status)
1266					& 0x00010000))
1267						break;
1268				if (sw_frame_id == (hw_frame_id + 1) %
1269					TX_RING_SIZE)
1270						break;
1271				skb = np->tx_skbuff[entry];
1272				/* Free the original skb. */
1273				dma_unmap_single(&np->pci_dev->dev,
1274					le32_to_cpu(np->tx_ring[entry].frag.addr),
1275					skb->len, DMA_TO_DEVICE);
1276				dev_consume_skb_irq(np->tx_skbuff[entry]);
1277				np->tx_skbuff[entry] = NULL;
1278				np->tx_ring[entry].frag.addr = 0;
1279				np->tx_ring[entry].frag.length = 0;
1280			}
1281			spin_unlock(&np->lock);
1282		} else {
1283			spin_lock(&np->lock);
1284			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1285				int entry = np->dirty_tx % TX_RING_SIZE;
1286				struct sk_buff *skb;
1287				if (!(le32_to_cpu(np->tx_ring[entry].status)
1288							& 0x00010000))
1289					break;
1290				skb = np->tx_skbuff[entry];
1291				/* Free the original skb. */
1292				dma_unmap_single(&np->pci_dev->dev,
1293					le32_to_cpu(np->tx_ring[entry].frag.addr),
1294					skb->len, DMA_TO_DEVICE);
1295				dev_consume_skb_irq(np->tx_skbuff[entry]);
1296				np->tx_skbuff[entry] = NULL;
1297				np->tx_ring[entry].frag.addr = 0;
1298				np->tx_ring[entry].frag.length = 0;
1299			}
1300			spin_unlock(&np->lock);
1301		}
1302
1303		if (netif_queue_stopped(dev) &&
1304			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1305			/* The ring is no longer full, clear busy flag. */
1306			netif_wake_queue (dev);
1307		}
1308		/* Abnormal error summary/uncommon events handlers. */
1309		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1310			netdev_error(dev, intr_status);
1311	} while (0);
1312	if (netif_msg_intr(np))
1313		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1314			   dev->name, ioread16(ioaddr + IntrStatus));
1315	return IRQ_RETVAL(handled);
1316}
1317
1318static void rx_poll(struct tasklet_struct *t)
1319{
1320	struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
1321	struct net_device *dev = np->ndev;
1322	int entry = np->cur_rx % RX_RING_SIZE;
1323	int boguscnt = np->budget;
1324	void __iomem *ioaddr = np->base;
1325	int received = 0;
1326
1327	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1328	while (1) {
1329		struct netdev_desc *desc = &(np->rx_ring[entry]);
1330		u32 frame_status = le32_to_cpu(desc->status);
1331		int pkt_len;
1332
1333		if (--boguscnt < 0) {
1334			goto not_done;
1335		}
1336		if (!(frame_status & DescOwn))
1337			break;
1338		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1339		if (netif_msg_rx_status(np))
1340			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1341				   frame_status);
1342		if (frame_status & 0x001f4000) {
1343			/* There was a error. */
1344			if (netif_msg_rx_err(np))
1345				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1346					   frame_status);
1347			dev->stats.rx_errors++;
1348			if (frame_status & 0x00100000)
1349				dev->stats.rx_length_errors++;
1350			if (frame_status & 0x00010000)
1351				dev->stats.rx_fifo_errors++;
1352			if (frame_status & 0x00060000)
1353				dev->stats.rx_frame_errors++;
1354			if (frame_status & 0x00080000)
1355				dev->stats.rx_crc_errors++;
1356			if (frame_status & 0x00100000) {
1357				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1358					   " status %8.8x.\n",
1359					   dev->name, frame_status);
1360			}
1361		} else {
1362			struct sk_buff *skb;
1363#ifndef final_version
1364			if (netif_msg_rx_status(np))
1365				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1366					   ", bogus_cnt %d.\n",
1367					   pkt_len, boguscnt);
1368#endif
1369			/* Check if the packet is long enough to accept without copying
1370			   to a minimally-sized skbuff. */
1371			if (pkt_len < rx_copybreak &&
1372			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1373				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1374				dma_sync_single_for_cpu(&np->pci_dev->dev,
1375						le32_to_cpu(desc->frag.addr),
1376						np->rx_buf_sz, DMA_FROM_DEVICE);
1377				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1378				dma_sync_single_for_device(&np->pci_dev->dev,
1379						le32_to_cpu(desc->frag.addr),
1380						np->rx_buf_sz, DMA_FROM_DEVICE);
1381				skb_put(skb, pkt_len);
1382			} else {
1383				dma_unmap_single(&np->pci_dev->dev,
1384					le32_to_cpu(desc->frag.addr),
1385					np->rx_buf_sz, DMA_FROM_DEVICE);
1386				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1387				np->rx_skbuff[entry] = NULL;
1388			}
1389			skb->protocol = eth_type_trans(skb, dev);
1390			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1391			netif_rx(skb);
1392		}
1393		entry = (entry + 1) % RX_RING_SIZE;
1394		received++;
1395	}
1396	np->cur_rx = entry;
1397	refill_rx (dev);
1398	np->budget -= received;
1399	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1400	return;
1401
1402not_done:
1403	np->cur_rx = entry;
1404	refill_rx (dev);
1405	if (!received)
1406		received = 1;
1407	np->budget -= received;
1408	if (np->budget <= 0)
1409		np->budget = RX_BUDGET;
1410	tasklet_schedule(&np->rx_tasklet);
1411}
1412
1413static void refill_rx (struct net_device *dev)
1414{
1415	struct netdev_private *np = netdev_priv(dev);
1416	int entry;
 
1417
1418	/* Refill the Rx ring buffers. */
1419	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1420		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1421		struct sk_buff *skb;
1422		entry = np->dirty_rx % RX_RING_SIZE;
1423		if (np->rx_skbuff[entry] == NULL) {
1424			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1425			np->rx_skbuff[entry] = skb;
1426			if (skb == NULL)
1427				break;		/* Better luck next round. */
1428			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1429			np->rx_ring[entry].frag.addr = cpu_to_le32(
1430				dma_map_single(&np->pci_dev->dev, skb->data,
1431					np->rx_buf_sz, DMA_FROM_DEVICE));
1432			if (dma_mapping_error(&np->pci_dev->dev,
1433				    np->rx_ring[entry].frag.addr)) {
1434			    dev_kfree_skb_irq(skb);
1435			    np->rx_skbuff[entry] = NULL;
1436			    break;
1437			}
1438		}
1439		/* Perhaps we need not reset this field. */
1440		np->rx_ring[entry].frag.length =
1441			cpu_to_le32(np->rx_buf_sz | LastFrag);
1442		np->rx_ring[entry].status = 0;
 
1443	}
1444}
1445static void netdev_error(struct net_device *dev, int intr_status)
1446{
1447	struct netdev_private *np = netdev_priv(dev);
1448	void __iomem *ioaddr = np->base;
1449	u16 mii_ctl, mii_advertise, mii_lpa;
1450	int speed;
1451
1452	if (intr_status & LinkChange) {
1453		if (mdio_wait_link(dev, 10) == 0) {
1454			printk(KERN_INFO "%s: Link up\n", dev->name);
1455			if (np->an_enable) {
1456				mii_advertise = mdio_read(dev, np->phys[0],
1457							   MII_ADVERTISE);
1458				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1459				mii_advertise &= mii_lpa;
1460				printk(KERN_INFO "%s: Link changed: ",
1461					dev->name);
1462				if (mii_advertise & ADVERTISE_100FULL) {
1463					np->speed = 100;
1464					printk("100Mbps, full duplex\n");
1465				} else if (mii_advertise & ADVERTISE_100HALF) {
1466					np->speed = 100;
1467					printk("100Mbps, half duplex\n");
1468				} else if (mii_advertise & ADVERTISE_10FULL) {
1469					np->speed = 10;
1470					printk("10Mbps, full duplex\n");
1471				} else if (mii_advertise & ADVERTISE_10HALF) {
1472					np->speed = 10;
1473					printk("10Mbps, half duplex\n");
1474				} else
1475					printk("\n");
1476
1477			} else {
1478				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1479				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1480				np->speed = speed;
1481				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1482					dev->name, speed);
1483				printk("%s duplex.\n",
1484					(mii_ctl & BMCR_FULLDPLX) ?
1485						"full" : "half");
1486			}
1487			check_duplex(dev);
1488			if (np->flowctrl && np->mii_if.full_duplex) {
1489				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1490					ioaddr + MulticastFilter1+2);
1491				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1492					ioaddr + MACCtrl0);
1493			}
1494			netif_carrier_on(dev);
1495		} else {
1496			printk(KERN_INFO "%s: Link down\n", dev->name);
1497			netif_carrier_off(dev);
1498		}
1499	}
1500	if (intr_status & StatsMax) {
1501		get_stats(dev);
1502	}
1503	if (intr_status & IntrPCIErr) {
1504		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1505			   dev->name, intr_status);
1506		/* We must do a global reset of DMA to continue. */
1507	}
1508}
1509
1510static struct net_device_stats *get_stats(struct net_device *dev)
1511{
1512	struct netdev_private *np = netdev_priv(dev);
1513	void __iomem *ioaddr = np->base;
1514	unsigned long flags;
1515	u8 late_coll, single_coll, mult_coll;
1516
1517	spin_lock_irqsave(&np->statlock, flags);
1518	/* The chip only need report frame silently dropped. */
1519	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1520	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1521	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1522	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1523
1524	mult_coll = ioread8(ioaddr + StatsMultiColl);
1525	np->xstats.tx_multiple_collisions += mult_coll;
1526	single_coll = ioread8(ioaddr + StatsOneColl);
1527	np->xstats.tx_single_collisions += single_coll;
1528	late_coll = ioread8(ioaddr + StatsLateColl);
1529	np->xstats.tx_late_collisions += late_coll;
1530	dev->stats.collisions += mult_coll
1531		+ single_coll
1532		+ late_coll;
1533
1534	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1535	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1536	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1537	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1538	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1539	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1540	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1541
1542	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1543	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1544	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1545	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1546
1547	spin_unlock_irqrestore(&np->statlock, flags);
1548
1549	return &dev->stats;
1550}
1551
1552static void set_rx_mode(struct net_device *dev)
1553{
1554	struct netdev_private *np = netdev_priv(dev);
1555	void __iomem *ioaddr = np->base;
1556	u16 mc_filter[4];			/* Multicast hash filter */
1557	u32 rx_mode;
1558	int i;
1559
1560	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1561		memset(mc_filter, 0xff, sizeof(mc_filter));
1562		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1563	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1564		   (dev->flags & IFF_ALLMULTI)) {
1565		/* Too many to match, or accept all multicasts. */
1566		memset(mc_filter, 0xff, sizeof(mc_filter));
1567		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1568	} else if (!netdev_mc_empty(dev)) {
1569		struct netdev_hw_addr *ha;
1570		int bit;
1571		int index;
1572		int crc;
1573		memset (mc_filter, 0, sizeof (mc_filter));
1574		netdev_for_each_mc_addr(ha, dev) {
1575			crc = ether_crc_le(ETH_ALEN, ha->addr);
1576			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1577				if (crc & 0x80000000) index |= 1 << bit;
1578			mc_filter[index/16] |= (1 << (index % 16));
1579		}
1580		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1581	} else {
1582		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1583		return;
1584	}
1585	if (np->mii_if.full_duplex && np->flowctrl)
1586		mc_filter[3] |= 0x0200;
1587
1588	for (i = 0; i < 4; i++)
1589		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1590	iowrite8(rx_mode, ioaddr + RxMode);
1591}
1592
1593static int __set_mac_addr(struct net_device *dev)
1594{
1595	struct netdev_private *np = netdev_priv(dev);
1596	u16 addr16;
1597
1598	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1599	iowrite16(addr16, np->base + StationAddr);
1600	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1601	iowrite16(addr16, np->base + StationAddr+2);
1602	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1603	iowrite16(addr16, np->base + StationAddr+4);
1604	return 0;
1605}
1606
1607/* Invoked with rtnl_lock held */
1608static int sundance_set_mac_addr(struct net_device *dev, void *data)
1609{
1610	const struct sockaddr *addr = data;
1611
1612	if (!is_valid_ether_addr(addr->sa_data))
1613		return -EADDRNOTAVAIL;
1614	eth_hw_addr_set(dev, addr->sa_data);
1615	__set_mac_addr(dev);
1616
1617	return 0;
1618}
1619
1620static const struct {
1621	const char name[ETH_GSTRING_LEN];
1622} sundance_stats[] = {
1623	{ "tx_multiple_collisions" },
1624	{ "tx_single_collisions" },
1625	{ "tx_late_collisions" },
1626	{ "tx_deferred" },
1627	{ "tx_deferred_excessive" },
1628	{ "tx_aborted" },
1629	{ "tx_bcasts" },
1630	{ "rx_bcasts" },
1631	{ "tx_mcasts" },
1632	{ "rx_mcasts" },
1633};
1634
1635static int check_if_running(struct net_device *dev)
1636{
1637	if (!netif_running(dev))
1638		return -EINVAL;
1639	return 0;
1640}
1641
1642static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1643{
1644	struct netdev_private *np = netdev_priv(dev);
1645	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1646	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 
1647}
1648
1649static int get_link_ksettings(struct net_device *dev,
1650			      struct ethtool_link_ksettings *cmd)
1651{
1652	struct netdev_private *np = netdev_priv(dev);
1653	spin_lock_irq(&np->lock);
1654	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1655	spin_unlock_irq(&np->lock);
1656	return 0;
1657}
1658
1659static int set_link_ksettings(struct net_device *dev,
1660			      const struct ethtool_link_ksettings *cmd)
1661{
1662	struct netdev_private *np = netdev_priv(dev);
1663	int res;
1664	spin_lock_irq(&np->lock);
1665	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1666	spin_unlock_irq(&np->lock);
1667	return res;
1668}
1669
1670static int nway_reset(struct net_device *dev)
1671{
1672	struct netdev_private *np = netdev_priv(dev);
1673	return mii_nway_restart(&np->mii_if);
1674}
1675
1676static u32 get_link(struct net_device *dev)
1677{
1678	struct netdev_private *np = netdev_priv(dev);
1679	return mii_link_ok(&np->mii_if);
1680}
1681
1682static u32 get_msglevel(struct net_device *dev)
1683{
1684	struct netdev_private *np = netdev_priv(dev);
1685	return np->msg_enable;
1686}
1687
1688static void set_msglevel(struct net_device *dev, u32 val)
1689{
1690	struct netdev_private *np = netdev_priv(dev);
1691	np->msg_enable = val;
1692}
1693
1694static void get_strings(struct net_device *dev, u32 stringset,
1695		u8 *data)
1696{
1697	if (stringset == ETH_SS_STATS)
1698		memcpy(data, sundance_stats, sizeof(sundance_stats));
1699}
1700
1701static int get_sset_count(struct net_device *dev, int sset)
1702{
1703	switch (sset) {
1704	case ETH_SS_STATS:
1705		return ARRAY_SIZE(sundance_stats);
1706	default:
1707		return -EOPNOTSUPP;
1708	}
1709}
1710
1711static void get_ethtool_stats(struct net_device *dev,
1712		struct ethtool_stats *stats, u64 *data)
1713{
1714	struct netdev_private *np = netdev_priv(dev);
1715	int i = 0;
1716
1717	get_stats(dev);
1718	data[i++] = np->xstats.tx_multiple_collisions;
1719	data[i++] = np->xstats.tx_single_collisions;
1720	data[i++] = np->xstats.tx_late_collisions;
1721	data[i++] = np->xstats.tx_deferred;
1722	data[i++] = np->xstats.tx_deferred_excessive;
1723	data[i++] = np->xstats.tx_aborted;
1724	data[i++] = np->xstats.tx_bcasts;
1725	data[i++] = np->xstats.rx_bcasts;
1726	data[i++] = np->xstats.tx_mcasts;
1727	data[i++] = np->xstats.rx_mcasts;
1728}
1729
1730#ifdef CONFIG_PM
1731
1732static void sundance_get_wol(struct net_device *dev,
1733		struct ethtool_wolinfo *wol)
1734{
1735	struct netdev_private *np = netdev_priv(dev);
1736	void __iomem *ioaddr = np->base;
1737	u8 wol_bits;
1738
1739	wol->wolopts = 0;
1740
1741	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1742	if (!np->wol_enabled)
1743		return;
1744
1745	wol_bits = ioread8(ioaddr + WakeEvent);
1746	if (wol_bits & MagicPktEnable)
1747		wol->wolopts |= WAKE_MAGIC;
1748	if (wol_bits & LinkEventEnable)
1749		wol->wolopts |= WAKE_PHY;
1750}
1751
1752static int sundance_set_wol(struct net_device *dev,
1753	struct ethtool_wolinfo *wol)
1754{
1755	struct netdev_private *np = netdev_priv(dev);
1756	void __iomem *ioaddr = np->base;
1757	u8 wol_bits;
1758
1759	if (!device_can_wakeup(&np->pci_dev->dev))
1760		return -EOPNOTSUPP;
1761
1762	np->wol_enabled = !!(wol->wolopts);
1763	wol_bits = ioread8(ioaddr + WakeEvent);
1764	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1765			LinkEventEnable | WolEnable);
1766
1767	if (np->wol_enabled) {
1768		if (wol->wolopts & WAKE_MAGIC)
1769			wol_bits |= (MagicPktEnable | WolEnable);
1770		if (wol->wolopts & WAKE_PHY)
1771			wol_bits |= (LinkEventEnable | WolEnable);
1772	}
1773	iowrite8(wol_bits, ioaddr + WakeEvent);
1774
1775	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1776
1777	return 0;
1778}
1779#else
1780#define sundance_get_wol NULL
1781#define sundance_set_wol NULL
1782#endif /* CONFIG_PM */
1783
1784static const struct ethtool_ops ethtool_ops = {
1785	.begin = check_if_running,
1786	.get_drvinfo = get_drvinfo,
 
 
1787	.nway_reset = nway_reset,
1788	.get_link = get_link,
1789	.get_wol = sundance_get_wol,
1790	.set_wol = sundance_set_wol,
1791	.get_msglevel = get_msglevel,
1792	.set_msglevel = set_msglevel,
1793	.get_strings = get_strings,
1794	.get_sset_count = get_sset_count,
1795	.get_ethtool_stats = get_ethtool_stats,
1796	.get_link_ksettings = get_link_ksettings,
1797	.set_link_ksettings = set_link_ksettings,
1798};
1799
1800static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1801{
1802	struct netdev_private *np = netdev_priv(dev);
1803	int rc;
1804
1805	if (!netif_running(dev))
1806		return -EINVAL;
1807
1808	spin_lock_irq(&np->lock);
1809	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1810	spin_unlock_irq(&np->lock);
1811
1812	return rc;
1813}
1814
1815static int netdev_close(struct net_device *dev)
1816{
1817	struct netdev_private *np = netdev_priv(dev);
1818	void __iomem *ioaddr = np->base;
1819	struct sk_buff *skb;
1820	int i;
1821
1822	/* Wait and kill tasklet */
1823	tasklet_kill(&np->rx_tasklet);
1824	tasklet_kill(&np->tx_tasklet);
1825	np->cur_tx = 0;
1826	np->dirty_tx = 0;
1827	np->cur_task = 0;
1828	np->last_tx = NULL;
1829
1830	netif_stop_queue(dev);
1831
1832	if (netif_msg_ifdown(np)) {
1833		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1834			   "Rx %4.4x Int %2.2x.\n",
1835			   dev->name, ioread8(ioaddr + TxStatus),
1836			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1837		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1838			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1839	}
1840
1841	/* Disable interrupts by clearing the interrupt mask. */
1842	iowrite16(0x0000, ioaddr + IntrEnable);
1843
1844	/* Disable Rx and Tx DMA for safely release resource */
1845	iowrite32(0x500, ioaddr + DMACtrl);
1846
1847	/* Stop the chip's Tx and Rx processes. */
1848	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1849
1850	for (i = 2000; i > 0; i--) {
1851		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1852			break;
1853		mdelay(1);
1854	}
1855
1856	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1857			ioaddr + ASIC_HI_WORD(ASICCtrl));
1858
1859	for (i = 2000; i > 0; i--) {
1860		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1861			break;
1862		mdelay(1);
1863	}
1864
1865#ifdef __i386__
1866	if (netif_msg_hw(np)) {
1867		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1868			   (int)(np->tx_ring_dma));
1869		for (i = 0; i < TX_RING_SIZE; i++)
1870			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1871				   i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
1872				   np->tx_ring[i].frag.length);
1873		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1874			   (int)(np->rx_ring_dma));
1875		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1876			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1877				   i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
1878				   np->rx_ring[i].frag.length);
1879		}
1880	}
1881#endif /* __i386__ debugging only */
1882
1883	free_irq(np->pci_dev->irq, dev);
1884
1885	del_timer_sync(&np->timer);
1886
1887	/* Free all the skbuffs in the Rx queue. */
1888	for (i = 0; i < RX_RING_SIZE; i++) {
1889		np->rx_ring[i].status = 0;
1890		skb = np->rx_skbuff[i];
1891		if (skb) {
1892			dma_unmap_single(&np->pci_dev->dev,
1893				le32_to_cpu(np->rx_ring[i].frag.addr),
1894				np->rx_buf_sz, DMA_FROM_DEVICE);
1895			dev_kfree_skb(skb);
1896			np->rx_skbuff[i] = NULL;
1897		}
1898		np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
1899	}
1900	for (i = 0; i < TX_RING_SIZE; i++) {
1901		np->tx_ring[i].next_desc = 0;
1902		skb = np->tx_skbuff[i];
1903		if (skb) {
1904			dma_unmap_single(&np->pci_dev->dev,
1905				le32_to_cpu(np->tx_ring[i].frag.addr),
1906				skb->len, DMA_TO_DEVICE);
1907			dev_kfree_skb(skb);
1908			np->tx_skbuff[i] = NULL;
1909		}
1910	}
1911
1912	return 0;
1913}
1914
1915static void sundance_remove1(struct pci_dev *pdev)
1916{
1917	struct net_device *dev = pci_get_drvdata(pdev);
1918
1919	if (dev) {
1920	    struct netdev_private *np = netdev_priv(dev);
1921	    unregister_netdev(dev);
1922	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1923		    np->rx_ring, np->rx_ring_dma);
1924	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1925		    np->tx_ring, np->tx_ring_dma);
1926	    pci_iounmap(pdev, np->base);
1927	    pci_release_regions(pdev);
1928	    free_netdev(dev);
1929	}
1930}
1931
1932static int __maybe_unused sundance_suspend(struct device *dev_d)
 
 
1933{
1934	struct net_device *dev = dev_get_drvdata(dev_d);
1935	struct netdev_private *np = netdev_priv(dev);
1936	void __iomem *ioaddr = np->base;
1937
1938	if (!netif_running(dev))
1939		return 0;
1940
1941	netdev_close(dev);
1942	netif_device_detach(dev);
1943
 
1944	if (np->wol_enabled) {
1945		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1946		iowrite16(RxEnable, ioaddr + MACCtrl1);
1947	}
1948
1949	device_set_wakeup_enable(dev_d, np->wol_enabled);
 
1950
1951	return 0;
1952}
1953
1954static int __maybe_unused sundance_resume(struct device *dev_d)
1955{
1956	struct net_device *dev = dev_get_drvdata(dev_d);
1957	int err = 0;
1958
1959	if (!netif_running(dev))
1960		return 0;
1961
 
 
 
 
1962	err = netdev_open(dev);
1963	if (err) {
1964		printk(KERN_ERR "%s: Can't resume interface!\n",
1965				dev->name);
1966		goto out;
1967	}
1968
1969	netif_device_attach(dev);
1970
1971out:
1972	return err;
1973}
1974
1975static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1976
1977static struct pci_driver sundance_driver = {
1978	.name		= DRV_NAME,
1979	.id_table	= sundance_pci_tbl,
1980	.probe		= sundance_probe1,
1981	.remove		= sundance_remove1,
1982	.driver.pm	= &sundance_pm_ops,
 
 
 
1983};
1984
1985module_pci_driver(sundance_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.6
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
  26#define DRV_VERSION	"1.2"
  27#define DRV_RELDATE	"11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45		 autosense	Autosensing active media.
  46		 10mbps_hd 	10Mbps half duplex.
  47		 10mbps_fd 	10Mbps full duplex.
  48		 100mbps_hd 	100Mbps half duplex.
  49		 100mbps_fd 	100Mbps full duplex.
  50		 0		Autosensing active media.
  51		 1	 	10Mbps half duplex.
  52		 2	 	10Mbps full duplex.
  53		 3	 	100Mbps half duplex.
  54		 4	 	100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE	32
  69#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE	64
  71#define RX_BUDGET	32
  72#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <asm/uaccess.h>
  95#include <asm/processor.h>		/* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] =
 106	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107	" Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122				Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static const struct pci_device_id sundance_pci_tbl[] = {
 203	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210	{ }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215	netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] = {
 222	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 223	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224	{"D-Link DFE-580TX 4 port Server Adapter"},
 225	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226	{"D-Link DL10050-based FAST Ethernet Adapter"},
 227	{"Sundance Technology Alta"},
 228	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229	{ }	/* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244	DMACtrl = 0x00,
 245	TxListPtr = 0x04,
 246	TxDMABurstThresh = 0x08,
 247	TxDMAUrgentThresh = 0x09,
 248	TxDMAPollPeriod = 0x0a,
 249	RxDMAStatus = 0x0c,
 250	RxListPtr = 0x10,
 251	DebugCtrl0 = 0x1a,
 252	DebugCtrl1 = 0x1c,
 253	RxDMABurstThresh = 0x14,
 254	RxDMAUrgentThresh = 0x15,
 255	RxDMAPollPeriod = 0x16,
 256	LEDCtrl = 0x1a,
 257	ASICCtrl = 0x30,
 258	EEData = 0x34,
 259	EECtrl = 0x36,
 260	FlashAddr = 0x40,
 261	FlashData = 0x44,
 262	WakeEvent = 0x45,
 263	TxStatus = 0x46,
 264	TxFrameId = 0x47,
 265	DownCounter = 0x18,
 266	IntrClear = 0x4a,
 267	IntrEnable = 0x4c,
 268	IntrStatus = 0x4e,
 269	MACCtrl0 = 0x50,
 270	MACCtrl1 = 0x52,
 271	StationAddr = 0x54,
 272	MaxFrameSize = 0x5A,
 273	RxMode = 0x5c,
 274	MIICtrl = 0x5e,
 275	MulticastFilter0 = 0x60,
 276	MulticastFilter1 = 0x64,
 277	RxOctetsLow = 0x68,
 278	RxOctetsHigh = 0x6a,
 279	TxOctetsLow = 0x6c,
 280	TxOctetsHigh = 0x6e,
 281	TxFramesOK = 0x70,
 282	RxFramesOK = 0x72,
 283	StatsCarrierError = 0x74,
 284	StatsLateColl = 0x75,
 285	StatsMultiColl = 0x76,
 286	StatsOneColl = 0x77,
 287	StatsTxDefer = 0x78,
 288	RxMissed = 0x79,
 289	StatsTxXSDefer = 0x7a,
 290	StatsTxAbort = 0x7b,
 291	StatsBcastTx = 0x7c,
 292	StatsBcastRx = 0x7d,
 293	StatsMcastTx = 0x7e,
 294	StatsMcastRx = 0x7f,
 295	/* Aliased and bogus values! */
 296	RxStatus = 0x0c,
 297};
 298
 299#define ASIC_HI_WORD(x)	((x) + 2)
 300
 301enum ASICCtrl_HiWord_bit {
 302	GlobalReset = 0x0001,
 303	RxReset = 0x0002,
 304	TxReset = 0x0004,
 305	DMAReset = 0x0008,
 306	FIFOReset = 0x0010,
 307	NetworkReset = 0x0020,
 308	HostReset = 0x0040,
 309	ResetBusy = 0x0400,
 310};
 311
 312/* Bits in the interrupt status/mask registers. */
 313enum intr_status_bits {
 314	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 315	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 316	IntrDrvRqst=0x0040,
 317	StatsMax=0x0080, LinkChange=0x0100,
 318	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 319};
 320
 321/* Bits in the RxMode register. */
 322enum rx_mode_bits {
 323	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 324	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 325};
 326/* Bits in MACCtrl. */
 327enum mac_ctrl0_bits {
 328	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 329	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 330};
 331enum mac_ctrl1_bits {
 332	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 333	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 334	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 335};
 336
 337/* Bits in WakeEvent register. */
 338enum wake_event_bits {
 339	WakePktEnable = 0x01,
 340	MagicPktEnable = 0x02,
 341	LinkEventEnable = 0x04,
 342	WolEnable = 0x80,
 343};
 344
 345/* The Rx and Tx buffer descriptors. */
 346/* Note that using only 32 bit fields simplifies conversion to big-endian
 347   architectures. */
 348struct netdev_desc {
 349	__le32 next_desc;
 350	__le32 status;
 351	struct desc_frag { __le32 addr, length; } frag[1];
 352};
 353
 354/* Bits in netdev_desc.status */
 355enum desc_status_bits {
 356	DescOwn=0x8000,
 357	DescEndPacket=0x4000,
 358	DescEndRing=0x2000,
 359	LastFrag=0x80000000,
 360	DescIntrOnTx=0x8000,
 361	DescIntrOnDMADone=0x80000000,
 362	DisableAlign = 0x00000001,
 363};
 364
 365#define PRIV_ALIGN	15 	/* Required alignment mask */
 366/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 367   within the structure. */
 368#define MII_CNT		4
 369struct netdev_private {
 370	/* Descriptor rings first for alignment. */
 371	struct netdev_desc *rx_ring;
 372	struct netdev_desc *tx_ring;
 373	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 374	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 375        dma_addr_t tx_ring_dma;
 376        dma_addr_t rx_ring_dma;
 377	struct timer_list timer;		/* Media monitoring timer. */
 
 378	/* ethtool extra stats */
 379	struct {
 380		u64 tx_multiple_collisions;
 381		u64 tx_single_collisions;
 382		u64 tx_late_collisions;
 383		u64 tx_deferred;
 384		u64 tx_deferred_excessive;
 385		u64 tx_aborted;
 386		u64 tx_bcasts;
 387		u64 rx_bcasts;
 388		u64 tx_mcasts;
 389		u64 rx_mcasts;
 390	} xstats;
 391	/* Frequently used values: keep some adjacent for cache effect. */
 392	spinlock_t lock;
 393	int msg_enable;
 394	int chip_id;
 395	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 396	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 397	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 398	unsigned int cur_tx, dirty_tx;
 399	/* These values are keep track of the transceiver/media in use. */
 400	unsigned int flowctrl:1;
 401	unsigned int default_port:4;		/* Last dev->if_port value. */
 402	unsigned int an_enable:1;
 403	unsigned int speed;
 404	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
 405	struct tasklet_struct rx_tasklet;
 406	struct tasklet_struct tx_tasklet;
 407	int budget;
 408	int cur_task;
 409	/* Multicast and receive mode. */
 410	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 411	u16 mcast_filter[4];
 412	/* MII transceiver section. */
 413	struct mii_if_info mii_if;
 414	int mii_preamble_required;
 415	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 416	struct pci_dev *pci_dev;
 417	void __iomem *base;
 418	spinlock_t statlock;
 419};
 420
 421/* The station address location in the EEPROM. */
 422#define EEPROM_SA_OFFSET	0x10
 423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 424			IntrDrvRqst | IntrTxDone | StatsMax | \
 425			LinkChange)
 426
 427static int  change_mtu(struct net_device *dev, int new_mtu);
 428static int  eeprom_read(void __iomem *ioaddr, int location);
 429static int  mdio_read(struct net_device *dev, int phy_id, int location);
 430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 431static int  mdio_wait_link(struct net_device *dev, int wait);
 432static int  netdev_open(struct net_device *dev);
 433static void check_duplex(struct net_device *dev);
 434static void netdev_timer(unsigned long data);
 435static void tx_timeout(struct net_device *dev);
 436static void init_ring(struct net_device *dev);
 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 438static int reset_tx (struct net_device *dev);
 439static irqreturn_t intr_handler(int irq, void *dev_instance);
 440static void rx_poll(unsigned long data);
 441static void tx_poll(unsigned long data);
 442static void refill_rx (struct net_device *dev);
 443static void netdev_error(struct net_device *dev, int intr_status);
 444static void netdev_error(struct net_device *dev, int intr_status);
 445static void set_rx_mode(struct net_device *dev);
 446static int __set_mac_addr(struct net_device *dev);
 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
 448static struct net_device_stats *get_stats(struct net_device *dev);
 449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 450static int  netdev_close(struct net_device *dev);
 451static const struct ethtool_ops ethtool_ops;
 452
 453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 454{
 455	struct netdev_private *np = netdev_priv(dev);
 456	void __iomem *ioaddr = np->base + ASICCtrl;
 457	int countdown;
 458
 459	/* ST201 documentation states ASICCtrl is a 32bit register */
 460	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 461	/* ST201 documentation states reset can take up to 1 ms */
 462	countdown = 10 + 1;
 463	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 464		if (--countdown == 0) {
 465			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 466			break;
 467		}
 468		udelay(100);
 469	}
 470}
 471
 472#ifdef CONFIG_NET_POLL_CONTROLLER
 473static void sundance_poll_controller(struct net_device *dev)
 474{
 475	struct netdev_private *np = netdev_priv(dev);
 476
 477	disable_irq(np->pci_dev->irq);
 478	intr_handler(np->pci_dev->irq, dev);
 479	enable_irq(np->pci_dev->irq);
 480}
 481#endif
 482
 483static const struct net_device_ops netdev_ops = {
 484	.ndo_open		= netdev_open,
 485	.ndo_stop		= netdev_close,
 486	.ndo_start_xmit		= start_tx,
 487	.ndo_get_stats 		= get_stats,
 488	.ndo_set_rx_mode	= set_rx_mode,
 489	.ndo_do_ioctl 		= netdev_ioctl,
 490	.ndo_tx_timeout		= tx_timeout,
 491	.ndo_change_mtu		= change_mtu,
 492	.ndo_set_mac_address 	= sundance_set_mac_addr,
 493	.ndo_validate_addr	= eth_validate_addr,
 494#ifdef CONFIG_NET_POLL_CONTROLLER
 495	.ndo_poll_controller 	= sundance_poll_controller,
 496#endif
 497};
 498
 499static int sundance_probe1(struct pci_dev *pdev,
 500			   const struct pci_device_id *ent)
 501{
 502	struct net_device *dev;
 503	struct netdev_private *np;
 504	static int card_idx;
 505	int chip_idx = ent->driver_data;
 506	int irq;
 507	int i;
 508	void __iomem *ioaddr;
 509	u16 mii_ctl;
 510	void *ring_space;
 511	dma_addr_t ring_dma;
 512#ifdef USE_IO_OPS
 513	int bar = 0;
 514#else
 515	int bar = 1;
 516#endif
 517	int phy, phy_end, phy_idx = 0;
 518
 519/* when built into the kernel, we only print version if device is found */
 520#ifndef MODULE
 521	static int printed_version;
 522	if (!printed_version++)
 523		printk(version);
 524#endif
 525
 526	if (pci_enable_device(pdev))
 527		return -EIO;
 528	pci_set_master(pdev);
 529
 530	irq = pdev->irq;
 531
 532	dev = alloc_etherdev(sizeof(*np));
 533	if (!dev)
 534		return -ENOMEM;
 535	SET_NETDEV_DEV(dev, &pdev->dev);
 536
 537	if (pci_request_regions(pdev, DRV_NAME))
 538		goto err_out_netdev;
 539
 540	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 541	if (!ioaddr)
 542		goto err_out_res;
 543
 544	for (i = 0; i < 3; i++)
 545		((__le16 *)dev->dev_addr)[i] =
 546			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 
 547
 548	np = netdev_priv(dev);
 
 549	np->base = ioaddr;
 550	np->pci_dev = pdev;
 551	np->chip_id = chip_idx;
 552	np->msg_enable = (1 << debug) - 1;
 553	spin_lock_init(&np->lock);
 554	spin_lock_init(&np->statlock);
 555	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 556	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 557
 558	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 559			&ring_dma, GFP_KERNEL);
 560	if (!ring_space)
 561		goto err_out_cleardev;
 562	np->tx_ring = (struct netdev_desc *)ring_space;
 563	np->tx_ring_dma = ring_dma;
 564
 565	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 566			&ring_dma, GFP_KERNEL);
 567	if (!ring_space)
 568		goto err_out_unmap_tx;
 569	np->rx_ring = (struct netdev_desc *)ring_space;
 570	np->rx_ring_dma = ring_dma;
 571
 572	np->mii_if.dev = dev;
 573	np->mii_if.mdio_read = mdio_read;
 574	np->mii_if.mdio_write = mdio_write;
 575	np->mii_if.phy_id_mask = 0x1f;
 576	np->mii_if.reg_num_mask = 0x1f;
 577
 578	/* The chip-specific entries in the device structure. */
 579	dev->netdev_ops = &netdev_ops;
 580	dev->ethtool_ops = &ethtool_ops;
 581	dev->watchdog_timeo = TX_TIMEOUT;
 582
 
 
 
 
 583	pci_set_drvdata(pdev, dev);
 584
 585	i = register_netdev(dev);
 586	if (i)
 587		goto err_out_unmap_rx;
 588
 589	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 590	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 591	       dev->dev_addr, irq);
 592
 593	np->phys[0] = 1;		/* Default setting */
 594	np->mii_preamble_required++;
 595
 596	/*
 597	 * It seems some phys doesn't deal well with address 0 being accessed
 598	 * first
 599	 */
 600	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 601		phy = 0;
 602		phy_end = 31;
 603	} else {
 604		phy = 1;
 605		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 606	}
 607	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 608		int phyx = phy & 0x1f;
 609		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 610		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 611			np->phys[phy_idx++] = phyx;
 612			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 613			if ((mii_status & 0x0040) == 0)
 614				np->mii_preamble_required++;
 615			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 616				   "0x%4.4x advertising %4.4x.\n",
 617				   dev->name, phyx, mii_status, np->mii_if.advertising);
 618		}
 619	}
 620	np->mii_preamble_required--;
 621
 622	if (phy_idx == 0) {
 623		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 624			   dev->name, ioread32(ioaddr + ASICCtrl));
 625		goto err_out_unregister;
 626	}
 627
 628	np->mii_if.phy_id = np->phys[0];
 629
 630	/* Parse override configuration */
 631	np->an_enable = 1;
 632	if (card_idx < MAX_UNITS) {
 633		if (media[card_idx] != NULL) {
 634			np->an_enable = 0;
 635			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 636			    strcmp (media[card_idx], "4") == 0) {
 637				np->speed = 100;
 638				np->mii_if.full_duplex = 1;
 639			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 640				   strcmp (media[card_idx], "3") == 0) {
 641				np->speed = 100;
 642				np->mii_if.full_duplex = 0;
 643			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 644				   strcmp (media[card_idx], "2") == 0) {
 645				np->speed = 10;
 646				np->mii_if.full_duplex = 1;
 647			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 648				   strcmp (media[card_idx], "1") == 0) {
 649				np->speed = 10;
 650				np->mii_if.full_duplex = 0;
 651			} else {
 652				np->an_enable = 1;
 653			}
 654		}
 655		if (flowctrl == 1)
 656			np->flowctrl = 1;
 657	}
 658
 659	/* Fibre PHY? */
 660	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 661		/* Default 100Mbps Full */
 662		if (np->an_enable) {
 663			np->speed = 100;
 664			np->mii_if.full_duplex = 1;
 665			np->an_enable = 0;
 666		}
 667	}
 668	/* Reset PHY */
 669	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 670	mdelay (300);
 671	/* If flow control enabled, we need to advertise it.*/
 672	if (np->flowctrl)
 673		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 674	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 675	/* Force media type */
 676	if (!np->an_enable) {
 677		mii_ctl = 0;
 678		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 679		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 680		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 681		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 682			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 683
 684	}
 685
 686	/* Perhaps move the reset here? */
 687	/* Reset the chip to erase previous misconfiguration. */
 688	if (netif_msg_hw(np))
 689		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 690	sundance_reset(dev, 0x00ff << 16);
 691	if (netif_msg_hw(np))
 692		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 693
 694	card_idx++;
 695	return 0;
 696
 697err_out_unregister:
 698	unregister_netdev(dev);
 699err_out_unmap_rx:
 700	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 701		np->rx_ring, np->rx_ring_dma);
 702err_out_unmap_tx:
 703	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 704		np->tx_ring, np->tx_ring_dma);
 705err_out_cleardev:
 706	pci_iounmap(pdev, ioaddr);
 707err_out_res:
 708	pci_release_regions(pdev);
 709err_out_netdev:
 710	free_netdev (dev);
 711	return -ENODEV;
 712}
 713
 714static int change_mtu(struct net_device *dev, int new_mtu)
 715{
 716	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
 717		return -EINVAL;
 718	if (netif_running(dev))
 719		return -EBUSY;
 720	dev->mtu = new_mtu;
 721	return 0;
 722}
 723
 724#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 725/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 726static int eeprom_read(void __iomem *ioaddr, int location)
 727{
 728	int boguscnt = 10000;		/* Typical 1900 ticks. */
 729	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 730	do {
 731		eeprom_delay(ioaddr + EECtrl);
 732		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 733			return ioread16(ioaddr + EEData);
 734		}
 735	} while (--boguscnt > 0);
 736	return 0;
 737}
 738
 739/*  MII transceiver control section.
 740	Read and write the MII registers using software-generated serial
 741	MDIO protocol.  See the MII specifications or DP83840A data sheet
 742	for details.
 743
 744	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 745	met by back-to-back 33Mhz PCI cycles. */
 746#define mdio_delay() ioread8(mdio_addr)
 747
 748enum mii_reg_bits {
 749	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 750};
 751#define MDIO_EnbIn  (0)
 752#define MDIO_WRITE0 (MDIO_EnbOutput)
 753#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 754
 755/* Generate the preamble required for initial synchronization and
 756   a few older transceivers. */
 757static void mdio_sync(void __iomem *mdio_addr)
 758{
 759	int bits = 32;
 760
 761	/* Establish sync by sending at least 32 logic ones. */
 762	while (--bits >= 0) {
 763		iowrite8(MDIO_WRITE1, mdio_addr);
 764		mdio_delay();
 765		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 766		mdio_delay();
 767	}
 768}
 769
 770static int mdio_read(struct net_device *dev, int phy_id, int location)
 771{
 772	struct netdev_private *np = netdev_priv(dev);
 773	void __iomem *mdio_addr = np->base + MIICtrl;
 774	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 775	int i, retval = 0;
 776
 777	if (np->mii_preamble_required)
 778		mdio_sync(mdio_addr);
 779
 780	/* Shift the read command bits out. */
 781	for (i = 15; i >= 0; i--) {
 782		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 783
 784		iowrite8(dataval, mdio_addr);
 785		mdio_delay();
 786		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 787		mdio_delay();
 788	}
 789	/* Read the two transition, 16 data, and wire-idle bits. */
 790	for (i = 19; i > 0; i--) {
 791		iowrite8(MDIO_EnbIn, mdio_addr);
 792		mdio_delay();
 793		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 794		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 795		mdio_delay();
 796	}
 797	return (retval>>1) & 0xffff;
 798}
 799
 800static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 801{
 802	struct netdev_private *np = netdev_priv(dev);
 803	void __iomem *mdio_addr = np->base + MIICtrl;
 804	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 805	int i;
 806
 807	if (np->mii_preamble_required)
 808		mdio_sync(mdio_addr);
 809
 810	/* Shift the command bits out. */
 811	for (i = 31; i >= 0; i--) {
 812		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 813
 814		iowrite8(dataval, mdio_addr);
 815		mdio_delay();
 816		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 817		mdio_delay();
 818	}
 819	/* Clear out extra bits. */
 820	for (i = 2; i > 0; i--) {
 821		iowrite8(MDIO_EnbIn, mdio_addr);
 822		mdio_delay();
 823		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 824		mdio_delay();
 825	}
 826}
 827
 828static int mdio_wait_link(struct net_device *dev, int wait)
 829{
 830	int bmsr;
 831	int phy_id;
 832	struct netdev_private *np;
 833
 834	np = netdev_priv(dev);
 835	phy_id = np->phys[0];
 836
 837	do {
 838		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 839		if (bmsr & 0x0004)
 840			return 0;
 841		mdelay(1);
 842	} while (--wait > 0);
 843	return -1;
 844}
 845
 846static int netdev_open(struct net_device *dev)
 847{
 848	struct netdev_private *np = netdev_priv(dev);
 849	void __iomem *ioaddr = np->base;
 850	const int irq = np->pci_dev->irq;
 851	unsigned long flags;
 852	int i;
 853
 854	sundance_reset(dev, 0x00ff << 16);
 855
 856	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 857	if (i)
 858		return i;
 859
 860	if (netif_msg_ifup(np))
 861		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 862
 863	init_ring(dev);
 864
 865	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 866	/* The Tx list pointer is written as packets are queued. */
 867
 868	/* Initialize other registers. */
 869	__set_mac_addr(dev);
 870#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 871	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 872#else
 873	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 874#endif
 875	if (dev->mtu > 2047)
 876		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 877
 878	/* Configure the PCI bus bursts and FIFO thresholds. */
 879
 880	if (dev->if_port == 0)
 881		dev->if_port = np->default_port;
 882
 883	spin_lock_init(&np->mcastlock);
 884
 885	set_rx_mode(dev);
 886	iowrite16(0, ioaddr + IntrEnable);
 887	iowrite16(0, ioaddr + DownCounter);
 888	/* Set the chip to poll every N*320nsec. */
 889	iowrite8(100, ioaddr + RxDMAPollPeriod);
 890	iowrite8(127, ioaddr + TxDMAPollPeriod);
 891	/* Fix DFE-580TX packet drop issue */
 892	if (np->pci_dev->revision >= 0x14)
 893		iowrite8(0x01, ioaddr + DebugCtrl1);
 894	netif_start_queue(dev);
 895
 896	spin_lock_irqsave(&np->lock, flags);
 897	reset_tx(dev);
 898	spin_unlock_irqrestore(&np->lock, flags);
 899
 900	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 901
 902	/* Disable Wol */
 903	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 904	np->wol_enabled = 0;
 905
 906	if (netif_msg_ifup(np))
 907		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 908			   "MAC Control %x, %4.4x %4.4x.\n",
 909			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 910			   ioread32(ioaddr + MACCtrl0),
 911			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 912
 913	/* Set the timer to check for link beat. */
 914	init_timer(&np->timer);
 915	np->timer.expires = jiffies + 3*HZ;
 916	np->timer.data = (unsigned long)dev;
 917	np->timer.function = netdev_timer;				/* timer handler */
 918	add_timer(&np->timer);
 919
 920	/* Enable interrupts by setting the interrupt mask. */
 921	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 922
 923	return 0;
 924}
 925
 926static void check_duplex(struct net_device *dev)
 927{
 928	struct netdev_private *np = netdev_priv(dev);
 929	void __iomem *ioaddr = np->base;
 930	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 931	int negotiated = mii_lpa & np->mii_if.advertising;
 932	int duplex;
 933
 934	/* Force media */
 935	if (!np->an_enable || mii_lpa == 0xffff) {
 936		if (np->mii_if.full_duplex)
 937			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 938				ioaddr + MACCtrl0);
 939		return;
 940	}
 941
 942	/* Autonegotiation */
 943	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 944	if (np->mii_if.full_duplex != duplex) {
 945		np->mii_if.full_duplex = duplex;
 946		if (netif_msg_link(np))
 947			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 948				   "negotiated capability %4.4x.\n", dev->name,
 949				   duplex ? "full" : "half", np->phys[0], negotiated);
 950		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 951	}
 952}
 953
 954static void netdev_timer(unsigned long data)
 955{
 956	struct net_device *dev = (struct net_device *)data;
 957	struct netdev_private *np = netdev_priv(dev);
 958	void __iomem *ioaddr = np->base;
 959	int next_tick = 10*HZ;
 960
 961	if (netif_msg_timer(np)) {
 962		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 963			   "Tx %x Rx %x.\n",
 964			   dev->name, ioread16(ioaddr + IntrEnable),
 965			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 966	}
 967	check_duplex(dev);
 968	np->timer.expires = jiffies + next_tick;
 969	add_timer(&np->timer);
 970}
 971
 972static void tx_timeout(struct net_device *dev)
 973{
 974	struct netdev_private *np = netdev_priv(dev);
 975	void __iomem *ioaddr = np->base;
 976	unsigned long flag;
 977
 978	netif_stop_queue(dev);
 979	tasklet_disable(&np->tx_tasklet);
 980	iowrite16(0, ioaddr + IntrEnable);
 981	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 982		   "TxFrameId %2.2x,"
 983		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 984		   ioread8(ioaddr + TxFrameId));
 985
 986	{
 987		int i;
 988		for (i=0; i<TX_RING_SIZE; i++) {
 989			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 990				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 991				le32_to_cpu(np->tx_ring[i].next_desc),
 992				le32_to_cpu(np->tx_ring[i].status),
 993				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 994				le32_to_cpu(np->tx_ring[i].frag[0].addr),
 995				le32_to_cpu(np->tx_ring[i].frag[0].length));
 996		}
 997		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 998			ioread32(np->base + TxListPtr),
 999			netif_queue_stopped(dev));
1000		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1001			np->cur_tx, np->cur_tx % TX_RING_SIZE,
1002			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1003		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1004		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1005	}
1006	spin_lock_irqsave(&np->lock, flag);
1007
1008	/* Stop and restart the chip's Tx processes . */
1009	reset_tx(dev);
1010	spin_unlock_irqrestore(&np->lock, flag);
1011
1012	dev->if_port = 0;
1013
1014	dev->trans_start = jiffies; /* prevent tx timeout */
1015	dev->stats.tx_errors++;
1016	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1017		netif_wake_queue(dev);
1018	}
1019	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1020	tasklet_enable(&np->tx_tasklet);
1021}
1022
1023
1024/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1025static void init_ring(struct net_device *dev)
1026{
1027	struct netdev_private *np = netdev_priv(dev);
1028	int i;
1029
1030	np->cur_rx = np->cur_tx = 0;
1031	np->dirty_rx = np->dirty_tx = 0;
1032	np->cur_task = 0;
1033
1034	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1035
1036	/* Initialize all Rx descriptors. */
1037	for (i = 0; i < RX_RING_SIZE; i++) {
1038		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1039			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1040		np->rx_ring[i].status = 0;
1041		np->rx_ring[i].frag[0].length = 0;
1042		np->rx_skbuff[i] = NULL;
1043	}
1044
1045	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1046	for (i = 0; i < RX_RING_SIZE; i++) {
1047		struct sk_buff *skb =
1048			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1049		np->rx_skbuff[i] = skb;
1050		if (skb == NULL)
1051			break;
1052		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1053		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1054			dma_map_single(&np->pci_dev->dev, skb->data,
1055				np->rx_buf_sz, DMA_FROM_DEVICE));
1056		if (dma_mapping_error(&np->pci_dev->dev,
1057					np->rx_ring[i].frag[0].addr)) {
1058			dev_kfree_skb(skb);
1059			np->rx_skbuff[i] = NULL;
1060			break;
1061		}
1062		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063	}
1064	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065
1066	for (i = 0; i < TX_RING_SIZE; i++) {
1067		np->tx_skbuff[i] = NULL;
1068		np->tx_ring[i].status = 0;
1069	}
1070}
1071
1072static void tx_poll (unsigned long data)
1073{
1074	struct net_device *dev = (struct net_device *)data;
1075	struct netdev_private *np = netdev_priv(dev);
1076	unsigned head = np->cur_task % TX_RING_SIZE;
1077	struct netdev_desc *txdesc =
1078		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1079
1080	/* Chain the next pointer */
1081	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082		int entry = np->cur_task % TX_RING_SIZE;
1083		txdesc = &np->tx_ring[entry];
1084		if (np->last_tx) {
1085			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086				entry*sizeof(struct netdev_desc));
1087		}
1088		np->last_tx = txdesc;
1089	}
1090	/* Indicate the latest descriptor of tx ring */
1091	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1092
1093	if (ioread32 (np->base + TxListPtr) == 0)
1094		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095			np->base + TxListPtr);
1096}
1097
1098static netdev_tx_t
1099start_tx (struct sk_buff *skb, struct net_device *dev)
1100{
1101	struct netdev_private *np = netdev_priv(dev);
1102	struct netdev_desc *txdesc;
1103	unsigned entry;
1104
1105	/* Calculate the next Tx descriptor entry. */
1106	entry = np->cur_tx % TX_RING_SIZE;
1107	np->tx_skbuff[entry] = skb;
1108	txdesc = &np->tx_ring[entry];
1109
1110	txdesc->next_desc = 0;
1111	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1112	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1113				skb->data, skb->len, DMA_TO_DEVICE));
1114	if (dma_mapping_error(&np->pci_dev->dev,
1115				txdesc->frag[0].addr))
1116			goto drop_frame;
1117	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118
1119	/* Increment cur_tx before tasklet_schedule() */
1120	np->cur_tx++;
1121	mb();
1122	/* Schedule a tx_poll() task */
1123	tasklet_schedule(&np->tx_tasklet);
1124
1125	/* On some architectures: explicitly flush cache lines here. */
1126	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1127	    !netif_queue_stopped(dev)) {
1128		/* do nothing */
1129	} else {
1130		netif_stop_queue (dev);
1131	}
1132	if (netif_msg_tx_queued(np)) {
1133		printk (KERN_DEBUG
1134			"%s: Transmit frame #%d queued in slot %d.\n",
1135			dev->name, np->cur_tx, entry);
1136	}
1137	return NETDEV_TX_OK;
1138
1139drop_frame:
1140	dev_kfree_skb_any(skb);
1141	np->tx_skbuff[entry] = NULL;
1142	dev->stats.tx_dropped++;
1143	return NETDEV_TX_OK;
1144}
1145
1146/* Reset hardware tx and free all of tx buffers */
1147static int
1148reset_tx (struct net_device *dev)
1149{
1150	struct netdev_private *np = netdev_priv(dev);
1151	void __iomem *ioaddr = np->base;
1152	struct sk_buff *skb;
1153	int i;
1154
1155	/* Reset tx logic, TxListPtr will be cleaned */
1156	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1157	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1158
1159	/* free all tx skbuff */
1160	for (i = 0; i < TX_RING_SIZE; i++) {
1161		np->tx_ring[i].next_desc = 0;
1162
1163		skb = np->tx_skbuff[i];
1164		if (skb) {
1165			dma_unmap_single(&np->pci_dev->dev,
1166				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1167				skb->len, DMA_TO_DEVICE);
1168			dev_kfree_skb_any(skb);
1169			np->tx_skbuff[i] = NULL;
1170			dev->stats.tx_dropped++;
1171		}
1172	}
1173	np->cur_tx = np->dirty_tx = 0;
1174	np->cur_task = 0;
1175
1176	np->last_tx = NULL;
1177	iowrite8(127, ioaddr + TxDMAPollPeriod);
1178
1179	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1180	return 0;
1181}
1182
1183/* The interrupt handler cleans up after the Tx thread,
1184   and schedule a Rx thread work */
1185static irqreturn_t intr_handler(int irq, void *dev_instance)
1186{
1187	struct net_device *dev = (struct net_device *)dev_instance;
1188	struct netdev_private *np = netdev_priv(dev);
1189	void __iomem *ioaddr = np->base;
1190	int hw_frame_id;
1191	int tx_cnt;
1192	int tx_status;
1193	int handled = 0;
1194	int i;
1195
1196
1197	do {
1198		int intr_status = ioread16(ioaddr + IntrStatus);
1199		iowrite16(intr_status, ioaddr + IntrStatus);
1200
1201		if (netif_msg_intr(np))
1202			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1203				   dev->name, intr_status);
1204
1205		if (!(intr_status & DEFAULT_INTR))
1206			break;
1207
1208		handled = 1;
1209
1210		if (intr_status & (IntrRxDMADone)) {
1211			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1212					ioaddr + IntrEnable);
1213			if (np->budget < 0)
1214				np->budget = RX_BUDGET;
1215			tasklet_schedule(&np->rx_tasklet);
1216		}
1217		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1218			tx_status = ioread16 (ioaddr + TxStatus);
1219			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1220				if (netif_msg_tx_done(np))
1221					printk
1222					    ("%s: Transmit status is %2.2x.\n",
1223				     	dev->name, tx_status);
1224				if (tx_status & 0x1e) {
1225					if (netif_msg_tx_err(np))
1226						printk("%s: Transmit error status %4.4x.\n",
1227							   dev->name, tx_status);
1228					dev->stats.tx_errors++;
1229					if (tx_status & 0x10)
1230						dev->stats.tx_fifo_errors++;
1231					if (tx_status & 0x08)
1232						dev->stats.collisions++;
1233					if (tx_status & 0x04)
1234						dev->stats.tx_fifo_errors++;
1235					if (tx_status & 0x02)
1236						dev->stats.tx_window_errors++;
1237
1238					/*
1239					** This reset has been verified on
1240					** DFE-580TX boards ! phdm@macqel.be.
1241					*/
1242					if (tx_status & 0x10) {	/* TxUnderrun */
1243						/* Restart Tx FIFO and transmitter */
1244						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1245						/* No need to reset the Tx pointer here */
1246					}
1247					/* Restart the Tx. Need to make sure tx enabled */
1248					i = 10;
1249					do {
1250						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1251						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1252							break;
1253						mdelay(1);
1254					} while (--i);
1255				}
1256				/* Yup, this is a documentation bug.  It cost me *hours*. */
1257				iowrite16 (0, ioaddr + TxStatus);
1258				if (tx_cnt < 0) {
1259					iowrite32(5000, ioaddr + DownCounter);
1260					break;
1261				}
1262				tx_status = ioread16 (ioaddr + TxStatus);
1263			}
1264			hw_frame_id = (tx_status >> 8) & 0xff;
1265		} else 	{
1266			hw_frame_id = ioread8(ioaddr + TxFrameId);
1267		}
1268
1269		if (np->pci_dev->revision >= 0x14) {
1270			spin_lock(&np->lock);
1271			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272				int entry = np->dirty_tx % TX_RING_SIZE;
1273				struct sk_buff *skb;
1274				int sw_frame_id;
1275				sw_frame_id = (le32_to_cpu(
1276					np->tx_ring[entry].status) >> 2) & 0xff;
1277				if (sw_frame_id == hw_frame_id &&
1278					!(le32_to_cpu(np->tx_ring[entry].status)
1279					& 0x00010000))
1280						break;
1281				if (sw_frame_id == (hw_frame_id + 1) %
1282					TX_RING_SIZE)
1283						break;
1284				skb = np->tx_skbuff[entry];
1285				/* Free the original skb. */
1286				dma_unmap_single(&np->pci_dev->dev,
1287					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1288					skb->len, DMA_TO_DEVICE);
1289				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1290				np->tx_skbuff[entry] = NULL;
1291				np->tx_ring[entry].frag[0].addr = 0;
1292				np->tx_ring[entry].frag[0].length = 0;
1293			}
1294			spin_unlock(&np->lock);
1295		} else {
1296			spin_lock(&np->lock);
1297			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1298				int entry = np->dirty_tx % TX_RING_SIZE;
1299				struct sk_buff *skb;
1300				if (!(le32_to_cpu(np->tx_ring[entry].status)
1301							& 0x00010000))
1302					break;
1303				skb = np->tx_skbuff[entry];
1304				/* Free the original skb. */
1305				dma_unmap_single(&np->pci_dev->dev,
1306					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1307					skb->len, DMA_TO_DEVICE);
1308				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1309				np->tx_skbuff[entry] = NULL;
1310				np->tx_ring[entry].frag[0].addr = 0;
1311				np->tx_ring[entry].frag[0].length = 0;
1312			}
1313			spin_unlock(&np->lock);
1314		}
1315
1316		if (netif_queue_stopped(dev) &&
1317			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1318			/* The ring is no longer full, clear busy flag. */
1319			netif_wake_queue (dev);
1320		}
1321		/* Abnormal error summary/uncommon events handlers. */
1322		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1323			netdev_error(dev, intr_status);
1324	} while (0);
1325	if (netif_msg_intr(np))
1326		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1327			   dev->name, ioread16(ioaddr + IntrStatus));
1328	return IRQ_RETVAL(handled);
1329}
1330
1331static void rx_poll(unsigned long data)
1332{
1333	struct net_device *dev = (struct net_device *)data;
1334	struct netdev_private *np = netdev_priv(dev);
1335	int entry = np->cur_rx % RX_RING_SIZE;
1336	int boguscnt = np->budget;
1337	void __iomem *ioaddr = np->base;
1338	int received = 0;
1339
1340	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1341	while (1) {
1342		struct netdev_desc *desc = &(np->rx_ring[entry]);
1343		u32 frame_status = le32_to_cpu(desc->status);
1344		int pkt_len;
1345
1346		if (--boguscnt < 0) {
1347			goto not_done;
1348		}
1349		if (!(frame_status & DescOwn))
1350			break;
1351		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1352		if (netif_msg_rx_status(np))
1353			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1354				   frame_status);
1355		if (frame_status & 0x001f4000) {
1356			/* There was a error. */
1357			if (netif_msg_rx_err(np))
1358				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1359					   frame_status);
1360			dev->stats.rx_errors++;
1361			if (frame_status & 0x00100000)
1362				dev->stats.rx_length_errors++;
1363			if (frame_status & 0x00010000)
1364				dev->stats.rx_fifo_errors++;
1365			if (frame_status & 0x00060000)
1366				dev->stats.rx_frame_errors++;
1367			if (frame_status & 0x00080000)
1368				dev->stats.rx_crc_errors++;
1369			if (frame_status & 0x00100000) {
1370				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1371					   " status %8.8x.\n",
1372					   dev->name, frame_status);
1373			}
1374		} else {
1375			struct sk_buff *skb;
1376#ifndef final_version
1377			if (netif_msg_rx_status(np))
1378				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1379					   ", bogus_cnt %d.\n",
1380					   pkt_len, boguscnt);
1381#endif
1382			/* Check if the packet is long enough to accept without copying
1383			   to a minimally-sized skbuff. */
1384			if (pkt_len < rx_copybreak &&
1385			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1386				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1387				dma_sync_single_for_cpu(&np->pci_dev->dev,
1388						le32_to_cpu(desc->frag[0].addr),
1389						np->rx_buf_sz, DMA_FROM_DEVICE);
1390				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1391				dma_sync_single_for_device(&np->pci_dev->dev,
1392						le32_to_cpu(desc->frag[0].addr),
1393						np->rx_buf_sz, DMA_FROM_DEVICE);
1394				skb_put(skb, pkt_len);
1395			} else {
1396				dma_unmap_single(&np->pci_dev->dev,
1397					le32_to_cpu(desc->frag[0].addr),
1398					np->rx_buf_sz, DMA_FROM_DEVICE);
1399				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1400				np->rx_skbuff[entry] = NULL;
1401			}
1402			skb->protocol = eth_type_trans(skb, dev);
1403			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1404			netif_rx(skb);
1405		}
1406		entry = (entry + 1) % RX_RING_SIZE;
1407		received++;
1408	}
1409	np->cur_rx = entry;
1410	refill_rx (dev);
1411	np->budget -= received;
1412	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1413	return;
1414
1415not_done:
1416	np->cur_rx = entry;
1417	refill_rx (dev);
1418	if (!received)
1419		received = 1;
1420	np->budget -= received;
1421	if (np->budget <= 0)
1422		np->budget = RX_BUDGET;
1423	tasklet_schedule(&np->rx_tasklet);
1424}
1425
1426static void refill_rx (struct net_device *dev)
1427{
1428	struct netdev_private *np = netdev_priv(dev);
1429	int entry;
1430	int cnt = 0;
1431
1432	/* Refill the Rx ring buffers. */
1433	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1434		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1435		struct sk_buff *skb;
1436		entry = np->dirty_rx % RX_RING_SIZE;
1437		if (np->rx_skbuff[entry] == NULL) {
1438			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1439			np->rx_skbuff[entry] = skb;
1440			if (skb == NULL)
1441				break;		/* Better luck next round. */
1442			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1443			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1444				dma_map_single(&np->pci_dev->dev, skb->data,
1445					np->rx_buf_sz, DMA_FROM_DEVICE));
1446			if (dma_mapping_error(&np->pci_dev->dev,
1447				    np->rx_ring[entry].frag[0].addr)) {
1448			    dev_kfree_skb_irq(skb);
1449			    np->rx_skbuff[entry] = NULL;
1450			    break;
1451			}
1452		}
1453		/* Perhaps we need not reset this field. */
1454		np->rx_ring[entry].frag[0].length =
1455			cpu_to_le32(np->rx_buf_sz | LastFrag);
1456		np->rx_ring[entry].status = 0;
1457		cnt++;
1458	}
1459}
1460static void netdev_error(struct net_device *dev, int intr_status)
1461{
1462	struct netdev_private *np = netdev_priv(dev);
1463	void __iomem *ioaddr = np->base;
1464	u16 mii_ctl, mii_advertise, mii_lpa;
1465	int speed;
1466
1467	if (intr_status & LinkChange) {
1468		if (mdio_wait_link(dev, 10) == 0) {
1469			printk(KERN_INFO "%s: Link up\n", dev->name);
1470			if (np->an_enable) {
1471				mii_advertise = mdio_read(dev, np->phys[0],
1472							   MII_ADVERTISE);
1473				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1474				mii_advertise &= mii_lpa;
1475				printk(KERN_INFO "%s: Link changed: ",
1476					dev->name);
1477				if (mii_advertise & ADVERTISE_100FULL) {
1478					np->speed = 100;
1479					printk("100Mbps, full duplex\n");
1480				} else if (mii_advertise & ADVERTISE_100HALF) {
1481					np->speed = 100;
1482					printk("100Mbps, half duplex\n");
1483				} else if (mii_advertise & ADVERTISE_10FULL) {
1484					np->speed = 10;
1485					printk("10Mbps, full duplex\n");
1486				} else if (mii_advertise & ADVERTISE_10HALF) {
1487					np->speed = 10;
1488					printk("10Mbps, half duplex\n");
1489				} else
1490					printk("\n");
1491
1492			} else {
1493				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1494				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1495				np->speed = speed;
1496				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1497					dev->name, speed);
1498				printk("%s duplex.\n",
1499					(mii_ctl & BMCR_FULLDPLX) ?
1500						"full" : "half");
1501			}
1502			check_duplex(dev);
1503			if (np->flowctrl && np->mii_if.full_duplex) {
1504				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1505					ioaddr + MulticastFilter1+2);
1506				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1507					ioaddr + MACCtrl0);
1508			}
1509			netif_carrier_on(dev);
1510		} else {
1511			printk(KERN_INFO "%s: Link down\n", dev->name);
1512			netif_carrier_off(dev);
1513		}
1514	}
1515	if (intr_status & StatsMax) {
1516		get_stats(dev);
1517	}
1518	if (intr_status & IntrPCIErr) {
1519		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1520			   dev->name, intr_status);
1521		/* We must do a global reset of DMA to continue. */
1522	}
1523}
1524
1525static struct net_device_stats *get_stats(struct net_device *dev)
1526{
1527	struct netdev_private *np = netdev_priv(dev);
1528	void __iomem *ioaddr = np->base;
1529	unsigned long flags;
1530	u8 late_coll, single_coll, mult_coll;
1531
1532	spin_lock_irqsave(&np->statlock, flags);
1533	/* The chip only need report frame silently dropped. */
1534	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1535	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1536	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1537	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1538
1539	mult_coll = ioread8(ioaddr + StatsMultiColl);
1540	np->xstats.tx_multiple_collisions += mult_coll;
1541	single_coll = ioread8(ioaddr + StatsOneColl);
1542	np->xstats.tx_single_collisions += single_coll;
1543	late_coll = ioread8(ioaddr + StatsLateColl);
1544	np->xstats.tx_late_collisions += late_coll;
1545	dev->stats.collisions += mult_coll
1546		+ single_coll
1547		+ late_coll;
1548
1549	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1550	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1551	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1552	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1553	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1554	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1555	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1556
1557	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1558	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1559	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1560	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1561
1562	spin_unlock_irqrestore(&np->statlock, flags);
1563
1564	return &dev->stats;
1565}
1566
1567static void set_rx_mode(struct net_device *dev)
1568{
1569	struct netdev_private *np = netdev_priv(dev);
1570	void __iomem *ioaddr = np->base;
1571	u16 mc_filter[4];			/* Multicast hash filter */
1572	u32 rx_mode;
1573	int i;
1574
1575	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1576		memset(mc_filter, 0xff, sizeof(mc_filter));
1577		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1578	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1579		   (dev->flags & IFF_ALLMULTI)) {
1580		/* Too many to match, or accept all multicasts. */
1581		memset(mc_filter, 0xff, sizeof(mc_filter));
1582		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1583	} else if (!netdev_mc_empty(dev)) {
1584		struct netdev_hw_addr *ha;
1585		int bit;
1586		int index;
1587		int crc;
1588		memset (mc_filter, 0, sizeof (mc_filter));
1589		netdev_for_each_mc_addr(ha, dev) {
1590			crc = ether_crc_le(ETH_ALEN, ha->addr);
1591			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1592				if (crc & 0x80000000) index |= 1 << bit;
1593			mc_filter[index/16] |= (1 << (index % 16));
1594		}
1595		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1596	} else {
1597		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1598		return;
1599	}
1600	if (np->mii_if.full_duplex && np->flowctrl)
1601		mc_filter[3] |= 0x0200;
1602
1603	for (i = 0; i < 4; i++)
1604		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1605	iowrite8(rx_mode, ioaddr + RxMode);
1606}
1607
1608static int __set_mac_addr(struct net_device *dev)
1609{
1610	struct netdev_private *np = netdev_priv(dev);
1611	u16 addr16;
1612
1613	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1614	iowrite16(addr16, np->base + StationAddr);
1615	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1616	iowrite16(addr16, np->base + StationAddr+2);
1617	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1618	iowrite16(addr16, np->base + StationAddr+4);
1619	return 0;
1620}
1621
1622/* Invoked with rtnl_lock held */
1623static int sundance_set_mac_addr(struct net_device *dev, void *data)
1624{
1625	const struct sockaddr *addr = data;
1626
1627	if (!is_valid_ether_addr(addr->sa_data))
1628		return -EADDRNOTAVAIL;
1629	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1630	__set_mac_addr(dev);
1631
1632	return 0;
1633}
1634
1635static const struct {
1636	const char name[ETH_GSTRING_LEN];
1637} sundance_stats[] = {
1638	{ "tx_multiple_collisions" },
1639	{ "tx_single_collisions" },
1640	{ "tx_late_collisions" },
1641	{ "tx_deferred" },
1642	{ "tx_deferred_excessive" },
1643	{ "tx_aborted" },
1644	{ "tx_bcasts" },
1645	{ "rx_bcasts" },
1646	{ "tx_mcasts" },
1647	{ "rx_mcasts" },
1648};
1649
1650static int check_if_running(struct net_device *dev)
1651{
1652	if (!netif_running(dev))
1653		return -EINVAL;
1654	return 0;
1655}
1656
1657static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1658{
1659	struct netdev_private *np = netdev_priv(dev);
1660	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1661	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1662	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1663}
1664
1665static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
1666{
1667	struct netdev_private *np = netdev_priv(dev);
1668	spin_lock_irq(&np->lock);
1669	mii_ethtool_gset(&np->mii_if, ecmd);
1670	spin_unlock_irq(&np->lock);
1671	return 0;
1672}
1673
1674static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
1675{
1676	struct netdev_private *np = netdev_priv(dev);
1677	int res;
1678	spin_lock_irq(&np->lock);
1679	res = mii_ethtool_sset(&np->mii_if, ecmd);
1680	spin_unlock_irq(&np->lock);
1681	return res;
1682}
1683
1684static int nway_reset(struct net_device *dev)
1685{
1686	struct netdev_private *np = netdev_priv(dev);
1687	return mii_nway_restart(&np->mii_if);
1688}
1689
1690static u32 get_link(struct net_device *dev)
1691{
1692	struct netdev_private *np = netdev_priv(dev);
1693	return mii_link_ok(&np->mii_if);
1694}
1695
1696static u32 get_msglevel(struct net_device *dev)
1697{
1698	struct netdev_private *np = netdev_priv(dev);
1699	return np->msg_enable;
1700}
1701
1702static void set_msglevel(struct net_device *dev, u32 val)
1703{
1704	struct netdev_private *np = netdev_priv(dev);
1705	np->msg_enable = val;
1706}
1707
1708static void get_strings(struct net_device *dev, u32 stringset,
1709		u8 *data)
1710{
1711	if (stringset == ETH_SS_STATS)
1712		memcpy(data, sundance_stats, sizeof(sundance_stats));
1713}
1714
1715static int get_sset_count(struct net_device *dev, int sset)
1716{
1717	switch (sset) {
1718	case ETH_SS_STATS:
1719		return ARRAY_SIZE(sundance_stats);
1720	default:
1721		return -EOPNOTSUPP;
1722	}
1723}
1724
1725static void get_ethtool_stats(struct net_device *dev,
1726		struct ethtool_stats *stats, u64 *data)
1727{
1728	struct netdev_private *np = netdev_priv(dev);
1729	int i = 0;
1730
1731	get_stats(dev);
1732	data[i++] = np->xstats.tx_multiple_collisions;
1733	data[i++] = np->xstats.tx_single_collisions;
1734	data[i++] = np->xstats.tx_late_collisions;
1735	data[i++] = np->xstats.tx_deferred;
1736	data[i++] = np->xstats.tx_deferred_excessive;
1737	data[i++] = np->xstats.tx_aborted;
1738	data[i++] = np->xstats.tx_bcasts;
1739	data[i++] = np->xstats.rx_bcasts;
1740	data[i++] = np->xstats.tx_mcasts;
1741	data[i++] = np->xstats.rx_mcasts;
1742}
1743
1744#ifdef CONFIG_PM
1745
1746static void sundance_get_wol(struct net_device *dev,
1747		struct ethtool_wolinfo *wol)
1748{
1749	struct netdev_private *np = netdev_priv(dev);
1750	void __iomem *ioaddr = np->base;
1751	u8 wol_bits;
1752
1753	wol->wolopts = 0;
1754
1755	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1756	if (!np->wol_enabled)
1757		return;
1758
1759	wol_bits = ioread8(ioaddr + WakeEvent);
1760	if (wol_bits & MagicPktEnable)
1761		wol->wolopts |= WAKE_MAGIC;
1762	if (wol_bits & LinkEventEnable)
1763		wol->wolopts |= WAKE_PHY;
1764}
1765
1766static int sundance_set_wol(struct net_device *dev,
1767	struct ethtool_wolinfo *wol)
1768{
1769	struct netdev_private *np = netdev_priv(dev);
1770	void __iomem *ioaddr = np->base;
1771	u8 wol_bits;
1772
1773	if (!device_can_wakeup(&np->pci_dev->dev))
1774		return -EOPNOTSUPP;
1775
1776	np->wol_enabled = !!(wol->wolopts);
1777	wol_bits = ioread8(ioaddr + WakeEvent);
1778	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1779			LinkEventEnable | WolEnable);
1780
1781	if (np->wol_enabled) {
1782		if (wol->wolopts & WAKE_MAGIC)
1783			wol_bits |= (MagicPktEnable | WolEnable);
1784		if (wol->wolopts & WAKE_PHY)
1785			wol_bits |= (LinkEventEnable | WolEnable);
1786	}
1787	iowrite8(wol_bits, ioaddr + WakeEvent);
1788
1789	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1790
1791	return 0;
1792}
1793#else
1794#define sundance_get_wol NULL
1795#define sundance_set_wol NULL
1796#endif /* CONFIG_PM */
1797
1798static const struct ethtool_ops ethtool_ops = {
1799	.begin = check_if_running,
1800	.get_drvinfo = get_drvinfo,
1801	.get_settings = get_settings,
1802	.set_settings = set_settings,
1803	.nway_reset = nway_reset,
1804	.get_link = get_link,
1805	.get_wol = sundance_get_wol,
1806	.set_wol = sundance_set_wol,
1807	.get_msglevel = get_msglevel,
1808	.set_msglevel = set_msglevel,
1809	.get_strings = get_strings,
1810	.get_sset_count = get_sset_count,
1811	.get_ethtool_stats = get_ethtool_stats,
 
 
1812};
1813
1814static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1815{
1816	struct netdev_private *np = netdev_priv(dev);
1817	int rc;
1818
1819	if (!netif_running(dev))
1820		return -EINVAL;
1821
1822	spin_lock_irq(&np->lock);
1823	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1824	spin_unlock_irq(&np->lock);
1825
1826	return rc;
1827}
1828
1829static int netdev_close(struct net_device *dev)
1830{
1831	struct netdev_private *np = netdev_priv(dev);
1832	void __iomem *ioaddr = np->base;
1833	struct sk_buff *skb;
1834	int i;
1835
1836	/* Wait and kill tasklet */
1837	tasklet_kill(&np->rx_tasklet);
1838	tasklet_kill(&np->tx_tasklet);
1839	np->cur_tx = 0;
1840	np->dirty_tx = 0;
1841	np->cur_task = 0;
1842	np->last_tx = NULL;
1843
1844	netif_stop_queue(dev);
1845
1846	if (netif_msg_ifdown(np)) {
1847		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1848			   "Rx %4.4x Int %2.2x.\n",
1849			   dev->name, ioread8(ioaddr + TxStatus),
1850			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1851		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1852			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1853	}
1854
1855	/* Disable interrupts by clearing the interrupt mask. */
1856	iowrite16(0x0000, ioaddr + IntrEnable);
1857
1858	/* Disable Rx and Tx DMA for safely release resource */
1859	iowrite32(0x500, ioaddr + DMACtrl);
1860
1861	/* Stop the chip's Tx and Rx processes. */
1862	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1863
1864    	for (i = 2000; i > 0; i--) {
1865 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1866			break;
1867		mdelay(1);
1868    	}
1869
1870    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1871			ioaddr + ASIC_HI_WORD(ASICCtrl));
1872
1873    	for (i = 2000; i > 0; i--) {
1874		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1875			break;
1876		mdelay(1);
1877    	}
1878
1879#ifdef __i386__
1880	if (netif_msg_hw(np)) {
1881		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1882			   (int)(np->tx_ring_dma));
1883		for (i = 0; i < TX_RING_SIZE; i++)
1884			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1885				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1886				   np->tx_ring[i].frag[0].length);
1887		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1888			   (int)(np->rx_ring_dma));
1889		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1890			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1891				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1892				   np->rx_ring[i].frag[0].length);
1893		}
1894	}
1895#endif /* __i386__ debugging only */
1896
1897	free_irq(np->pci_dev->irq, dev);
1898
1899	del_timer_sync(&np->timer);
1900
1901	/* Free all the skbuffs in the Rx queue. */
1902	for (i = 0; i < RX_RING_SIZE; i++) {
1903		np->rx_ring[i].status = 0;
1904		skb = np->rx_skbuff[i];
1905		if (skb) {
1906			dma_unmap_single(&np->pci_dev->dev,
1907				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1908				np->rx_buf_sz, DMA_FROM_DEVICE);
1909			dev_kfree_skb(skb);
1910			np->rx_skbuff[i] = NULL;
1911		}
1912		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1913	}
1914	for (i = 0; i < TX_RING_SIZE; i++) {
1915		np->tx_ring[i].next_desc = 0;
1916		skb = np->tx_skbuff[i];
1917		if (skb) {
1918			dma_unmap_single(&np->pci_dev->dev,
1919				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1920				skb->len, DMA_TO_DEVICE);
1921			dev_kfree_skb(skb);
1922			np->tx_skbuff[i] = NULL;
1923		}
1924	}
1925
1926	return 0;
1927}
1928
1929static void sundance_remove1(struct pci_dev *pdev)
1930{
1931	struct net_device *dev = pci_get_drvdata(pdev);
1932
1933	if (dev) {
1934	    struct netdev_private *np = netdev_priv(dev);
1935	    unregister_netdev(dev);
1936	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1937		    np->rx_ring, np->rx_ring_dma);
1938	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1939		    np->tx_ring, np->tx_ring_dma);
1940	    pci_iounmap(pdev, np->base);
1941	    pci_release_regions(pdev);
1942	    free_netdev(dev);
1943	}
1944}
1945
1946#ifdef CONFIG_PM
1947
1948static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1949{
1950	struct net_device *dev = pci_get_drvdata(pci_dev);
1951	struct netdev_private *np = netdev_priv(dev);
1952	void __iomem *ioaddr = np->base;
1953
1954	if (!netif_running(dev))
1955		return 0;
1956
1957	netdev_close(dev);
1958	netif_device_detach(dev);
1959
1960	pci_save_state(pci_dev);
1961	if (np->wol_enabled) {
1962		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1963		iowrite16(RxEnable, ioaddr + MACCtrl1);
1964	}
1965	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1966			np->wol_enabled);
1967	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1968
1969	return 0;
1970}
1971
1972static int sundance_resume(struct pci_dev *pci_dev)
1973{
1974	struct net_device *dev = pci_get_drvdata(pci_dev);
1975	int err = 0;
1976
1977	if (!netif_running(dev))
1978		return 0;
1979
1980	pci_set_power_state(pci_dev, PCI_D0);
1981	pci_restore_state(pci_dev);
1982	pci_enable_wake(pci_dev, PCI_D0, 0);
1983
1984	err = netdev_open(dev);
1985	if (err) {
1986		printk(KERN_ERR "%s: Can't resume interface!\n",
1987				dev->name);
1988		goto out;
1989	}
1990
1991	netif_device_attach(dev);
1992
1993out:
1994	return err;
1995}
1996
1997#endif /* CONFIG_PM */
1998
1999static struct pci_driver sundance_driver = {
2000	.name		= DRV_NAME,
2001	.id_table	= sundance_pci_tbl,
2002	.probe		= sundance_probe1,
2003	.remove		= sundance_remove1,
2004#ifdef CONFIG_PM
2005	.suspend	= sundance_suspend,
2006	.resume		= sundance_resume,
2007#endif /* CONFIG_PM */
2008};
2009
2010static int __init sundance_init(void)
2011{
2012/* when a module, this is printed whether or not devices are found in probe */
2013#ifdef MODULE
2014	printk(version);
2015#endif
2016	return pci_register_driver(&sundance_driver);
2017}
2018
2019static void __exit sundance_exit(void)
2020{
2021	pci_unregister_driver(&sundance_driver);
2022}
2023
2024module_init(sundance_init);
2025module_exit(sundance_exit);
2026
2027