Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v4.17
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
  26#define DRV_VERSION	"1.2"
  27#define DRV_RELDATE	"11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45		 autosense	Autosensing active media.
  46		 10mbps_hd 	10Mbps half duplex.
  47		 10mbps_fd 	10Mbps full duplex.
  48		 100mbps_hd 	100Mbps half duplex.
  49		 100mbps_fd 	100Mbps full duplex.
  50		 0		Autosensing active media.
  51		 1	 	10Mbps half duplex.
  52		 2	 	10Mbps full duplex.
  53		 3	 	100Mbps half duplex.
  54		 4	 	100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE	32
  69#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE	64
  71#define RX_BUDGET	32
  72#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <linux/uaccess.h>
  95#include <asm/processor.h>		/* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] =
 106	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107	" Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122				Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static const struct pci_device_id sundance_pci_tbl[] = {
 203	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210	{ }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215	netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] = {
 222	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 223	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224	{"D-Link DFE-580TX 4 port Server Adapter"},
 225	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226	{"D-Link DL10050-based FAST Ethernet Adapter"},
 227	{"Sundance Technology Alta"},
 228	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229	{ }	/* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244	DMACtrl = 0x00,
 245	TxListPtr = 0x04,
 246	TxDMABurstThresh = 0x08,
 247	TxDMAUrgentThresh = 0x09,
 248	TxDMAPollPeriod = 0x0a,
 249	RxDMAStatus = 0x0c,
 250	RxListPtr = 0x10,
 251	DebugCtrl0 = 0x1a,
 252	DebugCtrl1 = 0x1c,
 253	RxDMABurstThresh = 0x14,
 254	RxDMAUrgentThresh = 0x15,
 255	RxDMAPollPeriod = 0x16,
 256	LEDCtrl = 0x1a,
 257	ASICCtrl = 0x30,
 258	EEData = 0x34,
 259	EECtrl = 0x36,
 260	FlashAddr = 0x40,
 261	FlashData = 0x44,
 262	WakeEvent = 0x45,
 263	TxStatus = 0x46,
 264	TxFrameId = 0x47,
 265	DownCounter = 0x18,
 266	IntrClear = 0x4a,
 267	IntrEnable = 0x4c,
 268	IntrStatus = 0x4e,
 269	MACCtrl0 = 0x50,
 270	MACCtrl1 = 0x52,
 271	StationAddr = 0x54,
 272	MaxFrameSize = 0x5A,
 273	RxMode = 0x5c,
 274	MIICtrl = 0x5e,
 275	MulticastFilter0 = 0x60,
 276	MulticastFilter1 = 0x64,
 277	RxOctetsLow = 0x68,
 278	RxOctetsHigh = 0x6a,
 279	TxOctetsLow = 0x6c,
 280	TxOctetsHigh = 0x6e,
 281	TxFramesOK = 0x70,
 282	RxFramesOK = 0x72,
 283	StatsCarrierError = 0x74,
 284	StatsLateColl = 0x75,
 285	StatsMultiColl = 0x76,
 286	StatsOneColl = 0x77,
 287	StatsTxDefer = 0x78,
 288	RxMissed = 0x79,
 289	StatsTxXSDefer = 0x7a,
 290	StatsTxAbort = 0x7b,
 291	StatsBcastTx = 0x7c,
 292	StatsBcastRx = 0x7d,
 293	StatsMcastTx = 0x7e,
 294	StatsMcastRx = 0x7f,
 295	/* Aliased and bogus values! */
 296	RxStatus = 0x0c,
 297};
 298
 299#define ASIC_HI_WORD(x)	((x) + 2)
 300
 301enum ASICCtrl_HiWord_bit {
 302	GlobalReset = 0x0001,
 303	RxReset = 0x0002,
 304	TxReset = 0x0004,
 305	DMAReset = 0x0008,
 306	FIFOReset = 0x0010,
 307	NetworkReset = 0x0020,
 308	HostReset = 0x0040,
 309	ResetBusy = 0x0400,
 310};
 311
 312/* Bits in the interrupt status/mask registers. */
 313enum intr_status_bits {
 314	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 315	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 316	IntrDrvRqst=0x0040,
 317	StatsMax=0x0080, LinkChange=0x0100,
 318	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 319};
 320
 321/* Bits in the RxMode register. */
 322enum rx_mode_bits {
 323	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 324	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 325};
 326/* Bits in MACCtrl. */
 327enum mac_ctrl0_bits {
 328	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 329	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 330};
 331enum mac_ctrl1_bits {
 332	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 333	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 334	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 335};
 336
 337/* Bits in WakeEvent register. */
 338enum wake_event_bits {
 339	WakePktEnable = 0x01,
 340	MagicPktEnable = 0x02,
 341	LinkEventEnable = 0x04,
 342	WolEnable = 0x80,
 343};
 344
 345/* The Rx and Tx buffer descriptors. */
 346/* Note that using only 32 bit fields simplifies conversion to big-endian
 347   architectures. */
 348struct netdev_desc {
 349	__le32 next_desc;
 350	__le32 status;
 351	struct desc_frag { __le32 addr, length; } frag[1];
 352};
 353
 354/* Bits in netdev_desc.status */
 355enum desc_status_bits {
 356	DescOwn=0x8000,
 357	DescEndPacket=0x4000,
 358	DescEndRing=0x2000,
 359	LastFrag=0x80000000,
 360	DescIntrOnTx=0x8000,
 361	DescIntrOnDMADone=0x80000000,
 362	DisableAlign = 0x00000001,
 363};
 364
 365#define PRIV_ALIGN	15 	/* Required alignment mask */
 366/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 367   within the structure. */
 368#define MII_CNT		4
 369struct netdev_private {
 370	/* Descriptor rings first for alignment. */
 371	struct netdev_desc *rx_ring;
 372	struct netdev_desc *tx_ring;
 373	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 374	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 375        dma_addr_t tx_ring_dma;
 376        dma_addr_t rx_ring_dma;
 377	struct timer_list timer;		/* Media monitoring timer. */
 378	/* ethtool extra stats */
 379	struct {
 380		u64 tx_multiple_collisions;
 381		u64 tx_single_collisions;
 382		u64 tx_late_collisions;
 383		u64 tx_deferred;
 384		u64 tx_deferred_excessive;
 385		u64 tx_aborted;
 386		u64 tx_bcasts;
 387		u64 rx_bcasts;
 388		u64 tx_mcasts;
 389		u64 rx_mcasts;
 390	} xstats;
 391	/* Frequently used values: keep some adjacent for cache effect. */
 392	spinlock_t lock;
 393	int msg_enable;
 394	int chip_id;
 395	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 396	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 397	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 398	unsigned int cur_tx, dirty_tx;
 399	/* These values are keep track of the transceiver/media in use. */
 400	unsigned int flowctrl:1;
 401	unsigned int default_port:4;		/* Last dev->if_port value. */
 402	unsigned int an_enable:1;
 403	unsigned int speed;
 404	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
 405	struct tasklet_struct rx_tasklet;
 406	struct tasklet_struct tx_tasklet;
 407	int budget;
 408	int cur_task;
 409	/* Multicast and receive mode. */
 410	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 411	u16 mcast_filter[4];
 412	/* MII transceiver section. */
 413	struct mii_if_info mii_if;
 414	int mii_preamble_required;
 415	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 416	struct pci_dev *pci_dev;
 417	void __iomem *base;
 418	spinlock_t statlock;
 419};
 420
 421/* The station address location in the EEPROM. */
 422#define EEPROM_SA_OFFSET	0x10
 423#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 424			IntrDrvRqst | IntrTxDone | StatsMax | \
 425			LinkChange)
 426
 427static int  change_mtu(struct net_device *dev, int new_mtu);
 428static int  eeprom_read(void __iomem *ioaddr, int location);
 429static int  mdio_read(struct net_device *dev, int phy_id, int location);
 430static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 431static int  mdio_wait_link(struct net_device *dev, int wait);
 432static int  netdev_open(struct net_device *dev);
 433static void check_duplex(struct net_device *dev);
 434static void netdev_timer(struct timer_list *t);
 435static void tx_timeout(struct net_device *dev);
 436static void init_ring(struct net_device *dev);
 437static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 438static int reset_tx (struct net_device *dev);
 439static irqreturn_t intr_handler(int irq, void *dev_instance);
 440static void rx_poll(unsigned long data);
 441static void tx_poll(unsigned long data);
 442static void refill_rx (struct net_device *dev);
 443static void netdev_error(struct net_device *dev, int intr_status);
 444static void netdev_error(struct net_device *dev, int intr_status);
 445static void set_rx_mode(struct net_device *dev);
 446static int __set_mac_addr(struct net_device *dev);
 447static int sundance_set_mac_addr(struct net_device *dev, void *data);
 448static struct net_device_stats *get_stats(struct net_device *dev);
 449static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 450static int  netdev_close(struct net_device *dev);
 451static const struct ethtool_ops ethtool_ops;
 452
 453static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 454{
 455	struct netdev_private *np = netdev_priv(dev);
 456	void __iomem *ioaddr = np->base + ASICCtrl;
 457	int countdown;
 458
 459	/* ST201 documentation states ASICCtrl is a 32bit register */
 460	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 461	/* ST201 documentation states reset can take up to 1 ms */
 462	countdown = 10 + 1;
 463	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 464		if (--countdown == 0) {
 465			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 466			break;
 467		}
 468		udelay(100);
 469	}
 470}
 471
 472#ifdef CONFIG_NET_POLL_CONTROLLER
 473static void sundance_poll_controller(struct net_device *dev)
 474{
 475	struct netdev_private *np = netdev_priv(dev);
 476
 477	disable_irq(np->pci_dev->irq);
 478	intr_handler(np->pci_dev->irq, dev);
 479	enable_irq(np->pci_dev->irq);
 480}
 481#endif
 482
 483static const struct net_device_ops netdev_ops = {
 484	.ndo_open		= netdev_open,
 485	.ndo_stop		= netdev_close,
 486	.ndo_start_xmit		= start_tx,
 487	.ndo_get_stats 		= get_stats,
 488	.ndo_set_rx_mode	= set_rx_mode,
 489	.ndo_do_ioctl 		= netdev_ioctl,
 490	.ndo_tx_timeout		= tx_timeout,
 491	.ndo_change_mtu		= change_mtu,
 492	.ndo_set_mac_address 	= sundance_set_mac_addr,
 493	.ndo_validate_addr	= eth_validate_addr,
 494#ifdef CONFIG_NET_POLL_CONTROLLER
 495	.ndo_poll_controller 	= sundance_poll_controller,
 496#endif
 497};
 498
 499static int sundance_probe1(struct pci_dev *pdev,
 500			   const struct pci_device_id *ent)
 501{
 502	struct net_device *dev;
 503	struct netdev_private *np;
 504	static int card_idx;
 505	int chip_idx = ent->driver_data;
 506	int irq;
 507	int i;
 508	void __iomem *ioaddr;
 509	u16 mii_ctl;
 510	void *ring_space;
 511	dma_addr_t ring_dma;
 512#ifdef USE_IO_OPS
 513	int bar = 0;
 514#else
 515	int bar = 1;
 516#endif
 517	int phy, phy_end, phy_idx = 0;
 518
 519/* when built into the kernel, we only print version if device is found */
 520#ifndef MODULE
 521	static int printed_version;
 522	if (!printed_version++)
 523		printk(version);
 524#endif
 525
 526	if (pci_enable_device(pdev))
 527		return -EIO;
 528	pci_set_master(pdev);
 529
 530	irq = pdev->irq;
 531
 532	dev = alloc_etherdev(sizeof(*np));
 533	if (!dev)
 534		return -ENOMEM;
 535	SET_NETDEV_DEV(dev, &pdev->dev);
 536
 537	if (pci_request_regions(pdev, DRV_NAME))
 538		goto err_out_netdev;
 539
 540	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 541	if (!ioaddr)
 542		goto err_out_res;
 543
 544	for (i = 0; i < 3; i++)
 545		((__le16 *)dev->dev_addr)[i] =
 546			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 547
 548	np = netdev_priv(dev);
 549	np->base = ioaddr;
 550	np->pci_dev = pdev;
 551	np->chip_id = chip_idx;
 552	np->msg_enable = (1 << debug) - 1;
 553	spin_lock_init(&np->lock);
 554	spin_lock_init(&np->statlock);
 555	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 556	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 557
 558	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 559			&ring_dma, GFP_KERNEL);
 560	if (!ring_space)
 561		goto err_out_cleardev;
 562	np->tx_ring = (struct netdev_desc *)ring_space;
 563	np->tx_ring_dma = ring_dma;
 564
 565	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 566			&ring_dma, GFP_KERNEL);
 567	if (!ring_space)
 568		goto err_out_unmap_tx;
 569	np->rx_ring = (struct netdev_desc *)ring_space;
 570	np->rx_ring_dma = ring_dma;
 571
 572	np->mii_if.dev = dev;
 573	np->mii_if.mdio_read = mdio_read;
 574	np->mii_if.mdio_write = mdio_write;
 575	np->mii_if.phy_id_mask = 0x1f;
 576	np->mii_if.reg_num_mask = 0x1f;
 577
 578	/* The chip-specific entries in the device structure. */
 579	dev->netdev_ops = &netdev_ops;
 580	dev->ethtool_ops = &ethtool_ops;
 581	dev->watchdog_timeo = TX_TIMEOUT;
 582
 583	/* MTU range: 68 - 8191 */
 584	dev->min_mtu = ETH_MIN_MTU;
 585	dev->max_mtu = 8191;
 586
 587	pci_set_drvdata(pdev, dev);
 588
 589	i = register_netdev(dev);
 590	if (i)
 591		goto err_out_unmap_rx;
 592
 593	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 594	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 595	       dev->dev_addr, irq);
 596
 597	np->phys[0] = 1;		/* Default setting */
 598	np->mii_preamble_required++;
 599
 600	/*
 601	 * It seems some phys doesn't deal well with address 0 being accessed
 602	 * first
 603	 */
 604	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 605		phy = 0;
 606		phy_end = 31;
 607	} else {
 608		phy = 1;
 609		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 610	}
 611	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 612		int phyx = phy & 0x1f;
 613		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 614		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 615			np->phys[phy_idx++] = phyx;
 616			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 617			if ((mii_status & 0x0040) == 0)
 618				np->mii_preamble_required++;
 619			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 620				   "0x%4.4x advertising %4.4x.\n",
 621				   dev->name, phyx, mii_status, np->mii_if.advertising);
 622		}
 623	}
 624	np->mii_preamble_required--;
 625
 626	if (phy_idx == 0) {
 627		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 628			   dev->name, ioread32(ioaddr + ASICCtrl));
 629		goto err_out_unregister;
 630	}
 631
 632	np->mii_if.phy_id = np->phys[0];
 633
 634	/* Parse override configuration */
 635	np->an_enable = 1;
 636	if (card_idx < MAX_UNITS) {
 637		if (media[card_idx] != NULL) {
 638			np->an_enable = 0;
 639			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 640			    strcmp (media[card_idx], "4") == 0) {
 641				np->speed = 100;
 642				np->mii_if.full_duplex = 1;
 643			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 644				   strcmp (media[card_idx], "3") == 0) {
 645				np->speed = 100;
 646				np->mii_if.full_duplex = 0;
 647			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 648				   strcmp (media[card_idx], "2") == 0) {
 649				np->speed = 10;
 650				np->mii_if.full_duplex = 1;
 651			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 652				   strcmp (media[card_idx], "1") == 0) {
 653				np->speed = 10;
 654				np->mii_if.full_duplex = 0;
 655			} else {
 656				np->an_enable = 1;
 657			}
 658		}
 659		if (flowctrl == 1)
 660			np->flowctrl = 1;
 661	}
 662
 663	/* Fibre PHY? */
 664	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 665		/* Default 100Mbps Full */
 666		if (np->an_enable) {
 667			np->speed = 100;
 668			np->mii_if.full_duplex = 1;
 669			np->an_enable = 0;
 670		}
 671	}
 672	/* Reset PHY */
 673	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 674	mdelay (300);
 675	/* If flow control enabled, we need to advertise it.*/
 676	if (np->flowctrl)
 677		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 678	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 679	/* Force media type */
 680	if (!np->an_enable) {
 681		mii_ctl = 0;
 682		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 683		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 684		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 685		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 686			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 687
 688	}
 689
 690	/* Perhaps move the reset here? */
 691	/* Reset the chip to erase previous misconfiguration. */
 692	if (netif_msg_hw(np))
 693		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 694	sundance_reset(dev, 0x00ff << 16);
 695	if (netif_msg_hw(np))
 696		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 697
 698	card_idx++;
 699	return 0;
 700
 701err_out_unregister:
 702	unregister_netdev(dev);
 703err_out_unmap_rx:
 704	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 705		np->rx_ring, np->rx_ring_dma);
 706err_out_unmap_tx:
 707	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 708		np->tx_ring, np->tx_ring_dma);
 709err_out_cleardev:
 710	pci_iounmap(pdev, ioaddr);
 711err_out_res:
 712	pci_release_regions(pdev);
 713err_out_netdev:
 714	free_netdev (dev);
 715	return -ENODEV;
 716}
 717
 718static int change_mtu(struct net_device *dev, int new_mtu)
 719{
 720	if (netif_running(dev))
 721		return -EBUSY;
 722	dev->mtu = new_mtu;
 723	return 0;
 724}
 725
 726#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 727/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 728static int eeprom_read(void __iomem *ioaddr, int location)
 729{
 730	int boguscnt = 10000;		/* Typical 1900 ticks. */
 731	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 732	do {
 733		eeprom_delay(ioaddr + EECtrl);
 734		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 735			return ioread16(ioaddr + EEData);
 736		}
 737	} while (--boguscnt > 0);
 738	return 0;
 739}
 740
 741/*  MII transceiver control section.
 742	Read and write the MII registers using software-generated serial
 743	MDIO protocol.  See the MII specifications or DP83840A data sheet
 744	for details.
 745
 746	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 747	met by back-to-back 33Mhz PCI cycles. */
 748#define mdio_delay() ioread8(mdio_addr)
 749
 750enum mii_reg_bits {
 751	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 752};
 753#define MDIO_EnbIn  (0)
 754#define MDIO_WRITE0 (MDIO_EnbOutput)
 755#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 756
 757/* Generate the preamble required for initial synchronization and
 758   a few older transceivers. */
 759static void mdio_sync(void __iomem *mdio_addr)
 760{
 761	int bits = 32;
 762
 763	/* Establish sync by sending at least 32 logic ones. */
 764	while (--bits >= 0) {
 765		iowrite8(MDIO_WRITE1, mdio_addr);
 766		mdio_delay();
 767		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 768		mdio_delay();
 769	}
 770}
 771
 772static int mdio_read(struct net_device *dev, int phy_id, int location)
 773{
 774	struct netdev_private *np = netdev_priv(dev);
 775	void __iomem *mdio_addr = np->base + MIICtrl;
 776	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 777	int i, retval = 0;
 778
 779	if (np->mii_preamble_required)
 780		mdio_sync(mdio_addr);
 781
 782	/* Shift the read command bits out. */
 783	for (i = 15; i >= 0; i--) {
 784		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 785
 786		iowrite8(dataval, mdio_addr);
 787		mdio_delay();
 788		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 789		mdio_delay();
 790	}
 791	/* Read the two transition, 16 data, and wire-idle bits. */
 792	for (i = 19; i > 0; i--) {
 793		iowrite8(MDIO_EnbIn, mdio_addr);
 794		mdio_delay();
 795		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 796		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 797		mdio_delay();
 798	}
 799	return (retval>>1) & 0xffff;
 800}
 801
 802static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 803{
 804	struct netdev_private *np = netdev_priv(dev);
 805	void __iomem *mdio_addr = np->base + MIICtrl;
 806	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 807	int i;
 808
 809	if (np->mii_preamble_required)
 810		mdio_sync(mdio_addr);
 811
 812	/* Shift the command bits out. */
 813	for (i = 31; i >= 0; i--) {
 814		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 815
 816		iowrite8(dataval, mdio_addr);
 817		mdio_delay();
 818		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 819		mdio_delay();
 820	}
 821	/* Clear out extra bits. */
 822	for (i = 2; i > 0; i--) {
 823		iowrite8(MDIO_EnbIn, mdio_addr);
 824		mdio_delay();
 825		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 826		mdio_delay();
 827	}
 828}
 829
 830static int mdio_wait_link(struct net_device *dev, int wait)
 831{
 832	int bmsr;
 833	int phy_id;
 834	struct netdev_private *np;
 835
 836	np = netdev_priv(dev);
 837	phy_id = np->phys[0];
 838
 839	do {
 840		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 841		if (bmsr & 0x0004)
 842			return 0;
 843		mdelay(1);
 844	} while (--wait > 0);
 845	return -1;
 846}
 847
 848static int netdev_open(struct net_device *dev)
 849{
 850	struct netdev_private *np = netdev_priv(dev);
 851	void __iomem *ioaddr = np->base;
 852	const int irq = np->pci_dev->irq;
 853	unsigned long flags;
 854	int i;
 855
 856	sundance_reset(dev, 0x00ff << 16);
 857
 858	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 859	if (i)
 860		return i;
 861
 862	if (netif_msg_ifup(np))
 863		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 864
 865	init_ring(dev);
 866
 867	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 868	/* The Tx list pointer is written as packets are queued. */
 869
 870	/* Initialize other registers. */
 871	__set_mac_addr(dev);
 872#if IS_ENABLED(CONFIG_VLAN_8021Q)
 873	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 874#else
 875	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 876#endif
 877	if (dev->mtu > 2047)
 878		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 879
 880	/* Configure the PCI bus bursts and FIFO thresholds. */
 881
 882	if (dev->if_port == 0)
 883		dev->if_port = np->default_port;
 884
 885	spin_lock_init(&np->mcastlock);
 886
 887	set_rx_mode(dev);
 888	iowrite16(0, ioaddr + IntrEnable);
 889	iowrite16(0, ioaddr + DownCounter);
 890	/* Set the chip to poll every N*320nsec. */
 891	iowrite8(100, ioaddr + RxDMAPollPeriod);
 892	iowrite8(127, ioaddr + TxDMAPollPeriod);
 893	/* Fix DFE-580TX packet drop issue */
 894	if (np->pci_dev->revision >= 0x14)
 895		iowrite8(0x01, ioaddr + DebugCtrl1);
 896	netif_start_queue(dev);
 897
 898	spin_lock_irqsave(&np->lock, flags);
 899	reset_tx(dev);
 900	spin_unlock_irqrestore(&np->lock, flags);
 901
 902	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 903
 904	/* Disable Wol */
 905	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 906	np->wol_enabled = 0;
 907
 908	if (netif_msg_ifup(np))
 909		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 910			   "MAC Control %x, %4.4x %4.4x.\n",
 911			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 912			   ioread32(ioaddr + MACCtrl0),
 913			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 914
 915	/* Set the timer to check for link beat. */
 916	timer_setup(&np->timer, netdev_timer, 0);
 917	np->timer.expires = jiffies + 3*HZ;
 918	add_timer(&np->timer);
 919
 920	/* Enable interrupts by setting the interrupt mask. */
 921	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 922
 923	return 0;
 924}
 925
 926static void check_duplex(struct net_device *dev)
 927{
 928	struct netdev_private *np = netdev_priv(dev);
 929	void __iomem *ioaddr = np->base;
 930	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 931	int negotiated = mii_lpa & np->mii_if.advertising;
 932	int duplex;
 933
 934	/* Force media */
 935	if (!np->an_enable || mii_lpa == 0xffff) {
 936		if (np->mii_if.full_duplex)
 937			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 938				ioaddr + MACCtrl0);
 939		return;
 940	}
 941
 942	/* Autonegotiation */
 943	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 944	if (np->mii_if.full_duplex != duplex) {
 945		np->mii_if.full_duplex = duplex;
 946		if (netif_msg_link(np))
 947			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 948				   "negotiated capability %4.4x.\n", dev->name,
 949				   duplex ? "full" : "half", np->phys[0], negotiated);
 950		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 951	}
 952}
 953
 954static void netdev_timer(struct timer_list *t)
 955{
 956	struct netdev_private *np = from_timer(np, t, timer);
 957	struct net_device *dev = np->mii_if.dev;
 958	void __iomem *ioaddr = np->base;
 959	int next_tick = 10*HZ;
 960
 961	if (netif_msg_timer(np)) {
 962		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 963			   "Tx %x Rx %x.\n",
 964			   dev->name, ioread16(ioaddr + IntrEnable),
 965			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 966	}
 967	check_duplex(dev);
 968	np->timer.expires = jiffies + next_tick;
 969	add_timer(&np->timer);
 970}
 971
 972static void tx_timeout(struct net_device *dev)
 973{
 974	struct netdev_private *np = netdev_priv(dev);
 975	void __iomem *ioaddr = np->base;
 976	unsigned long flag;
 977
 978	netif_stop_queue(dev);
 979	tasklet_disable(&np->tx_tasklet);
 980	iowrite16(0, ioaddr + IntrEnable);
 981	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 982		   "TxFrameId %2.2x,"
 983		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 984		   ioread8(ioaddr + TxFrameId));
 985
 986	{
 987		int i;
 988		for (i=0; i<TX_RING_SIZE; i++) {
 989			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 990				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 991				le32_to_cpu(np->tx_ring[i].next_desc),
 992				le32_to_cpu(np->tx_ring[i].status),
 993				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 994				le32_to_cpu(np->tx_ring[i].frag[0].addr),
 995				le32_to_cpu(np->tx_ring[i].frag[0].length));
 996		}
 997		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 998			ioread32(np->base + TxListPtr),
 999			netif_queue_stopped(dev));
1000		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1001			np->cur_tx, np->cur_tx % TX_RING_SIZE,
1002			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1003		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1004		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1005	}
1006	spin_lock_irqsave(&np->lock, flag);
1007
1008	/* Stop and restart the chip's Tx processes . */
1009	reset_tx(dev);
1010	spin_unlock_irqrestore(&np->lock, flag);
1011
1012	dev->if_port = 0;
1013
1014	netif_trans_update(dev); /* prevent tx timeout */
1015	dev->stats.tx_errors++;
1016	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1017		netif_wake_queue(dev);
1018	}
1019	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1020	tasklet_enable(&np->tx_tasklet);
1021}
1022
1023
1024/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1025static void init_ring(struct net_device *dev)
1026{
1027	struct netdev_private *np = netdev_priv(dev);
1028	int i;
1029
1030	np->cur_rx = np->cur_tx = 0;
1031	np->dirty_rx = np->dirty_tx = 0;
1032	np->cur_task = 0;
1033
1034	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1035
1036	/* Initialize all Rx descriptors. */
1037	for (i = 0; i < RX_RING_SIZE; i++) {
1038		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1039			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1040		np->rx_ring[i].status = 0;
1041		np->rx_ring[i].frag[0].length = 0;
1042		np->rx_skbuff[i] = NULL;
1043	}
1044
1045	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1046	for (i = 0; i < RX_RING_SIZE; i++) {
1047		struct sk_buff *skb =
1048			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1049		np->rx_skbuff[i] = skb;
1050		if (skb == NULL)
1051			break;
1052		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1053		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1054			dma_map_single(&np->pci_dev->dev, skb->data,
1055				np->rx_buf_sz, DMA_FROM_DEVICE));
1056		if (dma_mapping_error(&np->pci_dev->dev,
1057					np->rx_ring[i].frag[0].addr)) {
1058			dev_kfree_skb(skb);
1059			np->rx_skbuff[i] = NULL;
1060			break;
1061		}
1062		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1063	}
1064	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1065
1066	for (i = 0; i < TX_RING_SIZE; i++) {
1067		np->tx_skbuff[i] = NULL;
1068		np->tx_ring[i].status = 0;
1069	}
1070}
1071
1072static void tx_poll (unsigned long data)
1073{
1074	struct net_device *dev = (struct net_device *)data;
1075	struct netdev_private *np = netdev_priv(dev);
1076	unsigned head = np->cur_task % TX_RING_SIZE;
1077	struct netdev_desc *txdesc =
1078		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1079
1080	/* Chain the next pointer */
1081	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1082		int entry = np->cur_task % TX_RING_SIZE;
1083		txdesc = &np->tx_ring[entry];
1084		if (np->last_tx) {
1085			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1086				entry*sizeof(struct netdev_desc));
1087		}
1088		np->last_tx = txdesc;
1089	}
1090	/* Indicate the latest descriptor of tx ring */
1091	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1092
1093	if (ioread32 (np->base + TxListPtr) == 0)
1094		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1095			np->base + TxListPtr);
1096}
1097
1098static netdev_tx_t
1099start_tx (struct sk_buff *skb, struct net_device *dev)
1100{
1101	struct netdev_private *np = netdev_priv(dev);
1102	struct netdev_desc *txdesc;
1103	unsigned entry;
1104
1105	/* Calculate the next Tx descriptor entry. */
1106	entry = np->cur_tx % TX_RING_SIZE;
1107	np->tx_skbuff[entry] = skb;
1108	txdesc = &np->tx_ring[entry];
1109
1110	txdesc->next_desc = 0;
1111	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1112	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1113				skb->data, skb->len, DMA_TO_DEVICE));
1114	if (dma_mapping_error(&np->pci_dev->dev,
1115				txdesc->frag[0].addr))
1116			goto drop_frame;
1117	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1118
1119	/* Increment cur_tx before tasklet_schedule() */
1120	np->cur_tx++;
1121	mb();
1122	/* Schedule a tx_poll() task */
1123	tasklet_schedule(&np->tx_tasklet);
1124
1125	/* On some architectures: explicitly flush cache lines here. */
1126	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1127	    !netif_queue_stopped(dev)) {
1128		/* do nothing */
1129	} else {
1130		netif_stop_queue (dev);
1131	}
1132	if (netif_msg_tx_queued(np)) {
1133		printk (KERN_DEBUG
1134			"%s: Transmit frame #%d queued in slot %d.\n",
1135			dev->name, np->cur_tx, entry);
1136	}
1137	return NETDEV_TX_OK;
1138
1139drop_frame:
1140	dev_kfree_skb_any(skb);
1141	np->tx_skbuff[entry] = NULL;
1142	dev->stats.tx_dropped++;
1143	return NETDEV_TX_OK;
1144}
1145
1146/* Reset hardware tx and free all of tx buffers */
1147static int
1148reset_tx (struct net_device *dev)
1149{
1150	struct netdev_private *np = netdev_priv(dev);
1151	void __iomem *ioaddr = np->base;
1152	struct sk_buff *skb;
1153	int i;
1154
1155	/* Reset tx logic, TxListPtr will be cleaned */
1156	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1157	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1158
1159	/* free all tx skbuff */
1160	for (i = 0; i < TX_RING_SIZE; i++) {
1161		np->tx_ring[i].next_desc = 0;
1162
1163		skb = np->tx_skbuff[i];
1164		if (skb) {
1165			dma_unmap_single(&np->pci_dev->dev,
1166				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1167				skb->len, DMA_TO_DEVICE);
1168			dev_kfree_skb_any(skb);
1169			np->tx_skbuff[i] = NULL;
1170			dev->stats.tx_dropped++;
1171		}
1172	}
1173	np->cur_tx = np->dirty_tx = 0;
1174	np->cur_task = 0;
1175
1176	np->last_tx = NULL;
1177	iowrite8(127, ioaddr + TxDMAPollPeriod);
1178
1179	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1180	return 0;
1181}
1182
1183/* The interrupt handler cleans up after the Tx thread,
1184   and schedule a Rx thread work */
1185static irqreturn_t intr_handler(int irq, void *dev_instance)
1186{
1187	struct net_device *dev = (struct net_device *)dev_instance;
1188	struct netdev_private *np = netdev_priv(dev);
1189	void __iomem *ioaddr = np->base;
1190	int hw_frame_id;
1191	int tx_cnt;
1192	int tx_status;
1193	int handled = 0;
1194	int i;
1195
1196
1197	do {
1198		int intr_status = ioread16(ioaddr + IntrStatus);
1199		iowrite16(intr_status, ioaddr + IntrStatus);
1200
1201		if (netif_msg_intr(np))
1202			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1203				   dev->name, intr_status);
1204
1205		if (!(intr_status & DEFAULT_INTR))
1206			break;
1207
1208		handled = 1;
1209
1210		if (intr_status & (IntrRxDMADone)) {
1211			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1212					ioaddr + IntrEnable);
1213			if (np->budget < 0)
1214				np->budget = RX_BUDGET;
1215			tasklet_schedule(&np->rx_tasklet);
1216		}
1217		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1218			tx_status = ioread16 (ioaddr + TxStatus);
1219			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1220				if (netif_msg_tx_done(np))
1221					printk
1222					    ("%s: Transmit status is %2.2x.\n",
1223				     	dev->name, tx_status);
1224				if (tx_status & 0x1e) {
1225					if (netif_msg_tx_err(np))
1226						printk("%s: Transmit error status %4.4x.\n",
1227							   dev->name, tx_status);
1228					dev->stats.tx_errors++;
1229					if (tx_status & 0x10)
1230						dev->stats.tx_fifo_errors++;
1231					if (tx_status & 0x08)
1232						dev->stats.collisions++;
1233					if (tx_status & 0x04)
1234						dev->stats.tx_fifo_errors++;
1235					if (tx_status & 0x02)
1236						dev->stats.tx_window_errors++;
1237
1238					/*
1239					** This reset has been verified on
1240					** DFE-580TX boards ! phdm@macqel.be.
1241					*/
1242					if (tx_status & 0x10) {	/* TxUnderrun */
1243						/* Restart Tx FIFO and transmitter */
1244						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1245						/* No need to reset the Tx pointer here */
1246					}
1247					/* Restart the Tx. Need to make sure tx enabled */
1248					i = 10;
1249					do {
1250						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1251						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1252							break;
1253						mdelay(1);
1254					} while (--i);
1255				}
1256				/* Yup, this is a documentation bug.  It cost me *hours*. */
1257				iowrite16 (0, ioaddr + TxStatus);
1258				if (tx_cnt < 0) {
1259					iowrite32(5000, ioaddr + DownCounter);
1260					break;
1261				}
1262				tx_status = ioread16 (ioaddr + TxStatus);
1263			}
1264			hw_frame_id = (tx_status >> 8) & 0xff;
1265		} else 	{
1266			hw_frame_id = ioread8(ioaddr + TxFrameId);
1267		}
1268
1269		if (np->pci_dev->revision >= 0x14) {
1270			spin_lock(&np->lock);
1271			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272				int entry = np->dirty_tx % TX_RING_SIZE;
1273				struct sk_buff *skb;
1274				int sw_frame_id;
1275				sw_frame_id = (le32_to_cpu(
1276					np->tx_ring[entry].status) >> 2) & 0xff;
1277				if (sw_frame_id == hw_frame_id &&
1278					!(le32_to_cpu(np->tx_ring[entry].status)
1279					& 0x00010000))
1280						break;
1281				if (sw_frame_id == (hw_frame_id + 1) %
1282					TX_RING_SIZE)
1283						break;
1284				skb = np->tx_skbuff[entry];
1285				/* Free the original skb. */
1286				dma_unmap_single(&np->pci_dev->dev,
1287					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1288					skb->len, DMA_TO_DEVICE);
1289				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1290				np->tx_skbuff[entry] = NULL;
1291				np->tx_ring[entry].frag[0].addr = 0;
1292				np->tx_ring[entry].frag[0].length = 0;
1293			}
1294			spin_unlock(&np->lock);
1295		} else {
1296			spin_lock(&np->lock);
1297			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1298				int entry = np->dirty_tx % TX_RING_SIZE;
1299				struct sk_buff *skb;
1300				if (!(le32_to_cpu(np->tx_ring[entry].status)
1301							& 0x00010000))
1302					break;
1303				skb = np->tx_skbuff[entry];
1304				/* Free the original skb. */
1305				dma_unmap_single(&np->pci_dev->dev,
1306					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1307					skb->len, DMA_TO_DEVICE);
1308				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1309				np->tx_skbuff[entry] = NULL;
1310				np->tx_ring[entry].frag[0].addr = 0;
1311				np->tx_ring[entry].frag[0].length = 0;
1312			}
1313			spin_unlock(&np->lock);
1314		}
1315
1316		if (netif_queue_stopped(dev) &&
1317			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1318			/* The ring is no longer full, clear busy flag. */
1319			netif_wake_queue (dev);
1320		}
1321		/* Abnormal error summary/uncommon events handlers. */
1322		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1323			netdev_error(dev, intr_status);
1324	} while (0);
1325	if (netif_msg_intr(np))
1326		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1327			   dev->name, ioread16(ioaddr + IntrStatus));
1328	return IRQ_RETVAL(handled);
1329}
1330
1331static void rx_poll(unsigned long data)
1332{
1333	struct net_device *dev = (struct net_device *)data;
1334	struct netdev_private *np = netdev_priv(dev);
1335	int entry = np->cur_rx % RX_RING_SIZE;
1336	int boguscnt = np->budget;
1337	void __iomem *ioaddr = np->base;
1338	int received = 0;
1339
1340	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1341	while (1) {
1342		struct netdev_desc *desc = &(np->rx_ring[entry]);
1343		u32 frame_status = le32_to_cpu(desc->status);
1344		int pkt_len;
1345
1346		if (--boguscnt < 0) {
1347			goto not_done;
1348		}
1349		if (!(frame_status & DescOwn))
1350			break;
1351		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1352		if (netif_msg_rx_status(np))
1353			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1354				   frame_status);
1355		if (frame_status & 0x001f4000) {
1356			/* There was a error. */
1357			if (netif_msg_rx_err(np))
1358				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1359					   frame_status);
1360			dev->stats.rx_errors++;
1361			if (frame_status & 0x00100000)
1362				dev->stats.rx_length_errors++;
1363			if (frame_status & 0x00010000)
1364				dev->stats.rx_fifo_errors++;
1365			if (frame_status & 0x00060000)
1366				dev->stats.rx_frame_errors++;
1367			if (frame_status & 0x00080000)
1368				dev->stats.rx_crc_errors++;
1369			if (frame_status & 0x00100000) {
1370				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1371					   " status %8.8x.\n",
1372					   dev->name, frame_status);
1373			}
1374		} else {
1375			struct sk_buff *skb;
1376#ifndef final_version
1377			if (netif_msg_rx_status(np))
1378				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1379					   ", bogus_cnt %d.\n",
1380					   pkt_len, boguscnt);
1381#endif
1382			/* Check if the packet is long enough to accept without copying
1383			   to a minimally-sized skbuff. */
1384			if (pkt_len < rx_copybreak &&
1385			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1386				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1387				dma_sync_single_for_cpu(&np->pci_dev->dev,
1388						le32_to_cpu(desc->frag[0].addr),
1389						np->rx_buf_sz, DMA_FROM_DEVICE);
1390				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1391				dma_sync_single_for_device(&np->pci_dev->dev,
1392						le32_to_cpu(desc->frag[0].addr),
1393						np->rx_buf_sz, DMA_FROM_DEVICE);
1394				skb_put(skb, pkt_len);
1395			} else {
1396				dma_unmap_single(&np->pci_dev->dev,
1397					le32_to_cpu(desc->frag[0].addr),
1398					np->rx_buf_sz, DMA_FROM_DEVICE);
1399				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1400				np->rx_skbuff[entry] = NULL;
1401			}
1402			skb->protocol = eth_type_trans(skb, dev);
1403			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1404			netif_rx(skb);
1405		}
1406		entry = (entry + 1) % RX_RING_SIZE;
1407		received++;
1408	}
1409	np->cur_rx = entry;
1410	refill_rx (dev);
1411	np->budget -= received;
1412	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1413	return;
1414
1415not_done:
1416	np->cur_rx = entry;
1417	refill_rx (dev);
1418	if (!received)
1419		received = 1;
1420	np->budget -= received;
1421	if (np->budget <= 0)
1422		np->budget = RX_BUDGET;
1423	tasklet_schedule(&np->rx_tasklet);
1424}
1425
1426static void refill_rx (struct net_device *dev)
1427{
1428	struct netdev_private *np = netdev_priv(dev);
1429	int entry;
1430	int cnt = 0;
1431
1432	/* Refill the Rx ring buffers. */
1433	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1434		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1435		struct sk_buff *skb;
1436		entry = np->dirty_rx % RX_RING_SIZE;
1437		if (np->rx_skbuff[entry] == NULL) {
1438			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1439			np->rx_skbuff[entry] = skb;
1440			if (skb == NULL)
1441				break;		/* Better luck next round. */
1442			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1443			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1444				dma_map_single(&np->pci_dev->dev, skb->data,
1445					np->rx_buf_sz, DMA_FROM_DEVICE));
1446			if (dma_mapping_error(&np->pci_dev->dev,
1447				    np->rx_ring[entry].frag[0].addr)) {
1448			    dev_kfree_skb_irq(skb);
1449			    np->rx_skbuff[entry] = NULL;
1450			    break;
1451			}
1452		}
1453		/* Perhaps we need not reset this field. */
1454		np->rx_ring[entry].frag[0].length =
1455			cpu_to_le32(np->rx_buf_sz | LastFrag);
1456		np->rx_ring[entry].status = 0;
1457		cnt++;
1458	}
1459}
1460static void netdev_error(struct net_device *dev, int intr_status)
1461{
1462	struct netdev_private *np = netdev_priv(dev);
1463	void __iomem *ioaddr = np->base;
1464	u16 mii_ctl, mii_advertise, mii_lpa;
1465	int speed;
1466
1467	if (intr_status & LinkChange) {
1468		if (mdio_wait_link(dev, 10) == 0) {
1469			printk(KERN_INFO "%s: Link up\n", dev->name);
1470			if (np->an_enable) {
1471				mii_advertise = mdio_read(dev, np->phys[0],
1472							   MII_ADVERTISE);
1473				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1474				mii_advertise &= mii_lpa;
1475				printk(KERN_INFO "%s: Link changed: ",
1476					dev->name);
1477				if (mii_advertise & ADVERTISE_100FULL) {
1478					np->speed = 100;
1479					printk("100Mbps, full duplex\n");
1480				} else if (mii_advertise & ADVERTISE_100HALF) {
1481					np->speed = 100;
1482					printk("100Mbps, half duplex\n");
1483				} else if (mii_advertise & ADVERTISE_10FULL) {
1484					np->speed = 10;
1485					printk("10Mbps, full duplex\n");
1486				} else if (mii_advertise & ADVERTISE_10HALF) {
1487					np->speed = 10;
1488					printk("10Mbps, half duplex\n");
1489				} else
1490					printk("\n");
1491
1492			} else {
1493				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1494				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1495				np->speed = speed;
1496				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1497					dev->name, speed);
1498				printk("%s duplex.\n",
1499					(mii_ctl & BMCR_FULLDPLX) ?
1500						"full" : "half");
1501			}
1502			check_duplex(dev);
1503			if (np->flowctrl && np->mii_if.full_duplex) {
1504				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1505					ioaddr + MulticastFilter1+2);
1506				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1507					ioaddr + MACCtrl0);
1508			}
1509			netif_carrier_on(dev);
1510		} else {
1511			printk(KERN_INFO "%s: Link down\n", dev->name);
1512			netif_carrier_off(dev);
1513		}
1514	}
1515	if (intr_status & StatsMax) {
1516		get_stats(dev);
1517	}
1518	if (intr_status & IntrPCIErr) {
1519		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1520			   dev->name, intr_status);
1521		/* We must do a global reset of DMA to continue. */
1522	}
1523}
1524
1525static struct net_device_stats *get_stats(struct net_device *dev)
1526{
1527	struct netdev_private *np = netdev_priv(dev);
1528	void __iomem *ioaddr = np->base;
1529	unsigned long flags;
1530	u8 late_coll, single_coll, mult_coll;
1531
1532	spin_lock_irqsave(&np->statlock, flags);
1533	/* The chip only need report frame silently dropped. */
1534	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1535	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1536	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1537	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1538
1539	mult_coll = ioread8(ioaddr + StatsMultiColl);
1540	np->xstats.tx_multiple_collisions += mult_coll;
1541	single_coll = ioread8(ioaddr + StatsOneColl);
1542	np->xstats.tx_single_collisions += single_coll;
1543	late_coll = ioread8(ioaddr + StatsLateColl);
1544	np->xstats.tx_late_collisions += late_coll;
1545	dev->stats.collisions += mult_coll
1546		+ single_coll
1547		+ late_coll;
1548
1549	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1550	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1551	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1552	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1553	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1554	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1555	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1556
1557	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1558	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1559	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1560	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1561
1562	spin_unlock_irqrestore(&np->statlock, flags);
1563
1564	return &dev->stats;
1565}
1566
1567static void set_rx_mode(struct net_device *dev)
1568{
1569	struct netdev_private *np = netdev_priv(dev);
1570	void __iomem *ioaddr = np->base;
1571	u16 mc_filter[4];			/* Multicast hash filter */
1572	u32 rx_mode;
1573	int i;
1574
1575	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1576		memset(mc_filter, 0xff, sizeof(mc_filter));
1577		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1578	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1579		   (dev->flags & IFF_ALLMULTI)) {
1580		/* Too many to match, or accept all multicasts. */
1581		memset(mc_filter, 0xff, sizeof(mc_filter));
1582		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1583	} else if (!netdev_mc_empty(dev)) {
1584		struct netdev_hw_addr *ha;
1585		int bit;
1586		int index;
1587		int crc;
1588		memset (mc_filter, 0, sizeof (mc_filter));
1589		netdev_for_each_mc_addr(ha, dev) {
1590			crc = ether_crc_le(ETH_ALEN, ha->addr);
1591			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1592				if (crc & 0x80000000) index |= 1 << bit;
1593			mc_filter[index/16] |= (1 << (index % 16));
1594		}
1595		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1596	} else {
1597		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1598		return;
1599	}
1600	if (np->mii_if.full_duplex && np->flowctrl)
1601		mc_filter[3] |= 0x0200;
1602
1603	for (i = 0; i < 4; i++)
1604		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1605	iowrite8(rx_mode, ioaddr + RxMode);
1606}
1607
1608static int __set_mac_addr(struct net_device *dev)
1609{
1610	struct netdev_private *np = netdev_priv(dev);
1611	u16 addr16;
1612
1613	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1614	iowrite16(addr16, np->base + StationAddr);
1615	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1616	iowrite16(addr16, np->base + StationAddr+2);
1617	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1618	iowrite16(addr16, np->base + StationAddr+4);
1619	return 0;
1620}
1621
1622/* Invoked with rtnl_lock held */
1623static int sundance_set_mac_addr(struct net_device *dev, void *data)
1624{
1625	const struct sockaddr *addr = data;
1626
1627	if (!is_valid_ether_addr(addr->sa_data))
1628		return -EADDRNOTAVAIL;
1629	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1630	__set_mac_addr(dev);
1631
1632	return 0;
1633}
1634
1635static const struct {
1636	const char name[ETH_GSTRING_LEN];
1637} sundance_stats[] = {
1638	{ "tx_multiple_collisions" },
1639	{ "tx_single_collisions" },
1640	{ "tx_late_collisions" },
1641	{ "tx_deferred" },
1642	{ "tx_deferred_excessive" },
1643	{ "tx_aborted" },
1644	{ "tx_bcasts" },
1645	{ "rx_bcasts" },
1646	{ "tx_mcasts" },
1647	{ "rx_mcasts" },
1648};
1649
1650static int check_if_running(struct net_device *dev)
1651{
1652	if (!netif_running(dev))
1653		return -EINVAL;
1654	return 0;
1655}
1656
1657static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1658{
1659	struct netdev_private *np = netdev_priv(dev);
1660	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1661	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1662	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1663}
1664
1665static int get_link_ksettings(struct net_device *dev,
1666			      struct ethtool_link_ksettings *cmd)
1667{
1668	struct netdev_private *np = netdev_priv(dev);
1669	spin_lock_irq(&np->lock);
1670	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1671	spin_unlock_irq(&np->lock);
1672	return 0;
1673}
1674
1675static int set_link_ksettings(struct net_device *dev,
1676			      const struct ethtool_link_ksettings *cmd)
1677{
1678	struct netdev_private *np = netdev_priv(dev);
1679	int res;
1680	spin_lock_irq(&np->lock);
1681	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1682	spin_unlock_irq(&np->lock);
1683	return res;
1684}
1685
1686static int nway_reset(struct net_device *dev)
1687{
1688	struct netdev_private *np = netdev_priv(dev);
1689	return mii_nway_restart(&np->mii_if);
1690}
1691
1692static u32 get_link(struct net_device *dev)
1693{
1694	struct netdev_private *np = netdev_priv(dev);
1695	return mii_link_ok(&np->mii_if);
1696}
1697
1698static u32 get_msglevel(struct net_device *dev)
1699{
1700	struct netdev_private *np = netdev_priv(dev);
1701	return np->msg_enable;
1702}
1703
1704static void set_msglevel(struct net_device *dev, u32 val)
1705{
1706	struct netdev_private *np = netdev_priv(dev);
1707	np->msg_enable = val;
1708}
1709
1710static void get_strings(struct net_device *dev, u32 stringset,
1711		u8 *data)
1712{
1713	if (stringset == ETH_SS_STATS)
1714		memcpy(data, sundance_stats, sizeof(sundance_stats));
1715}
1716
1717static int get_sset_count(struct net_device *dev, int sset)
1718{
1719	switch (sset) {
1720	case ETH_SS_STATS:
1721		return ARRAY_SIZE(sundance_stats);
1722	default:
1723		return -EOPNOTSUPP;
1724	}
1725}
1726
1727static void get_ethtool_stats(struct net_device *dev,
1728		struct ethtool_stats *stats, u64 *data)
1729{
1730	struct netdev_private *np = netdev_priv(dev);
1731	int i = 0;
1732
1733	get_stats(dev);
1734	data[i++] = np->xstats.tx_multiple_collisions;
1735	data[i++] = np->xstats.tx_single_collisions;
1736	data[i++] = np->xstats.tx_late_collisions;
1737	data[i++] = np->xstats.tx_deferred;
1738	data[i++] = np->xstats.tx_deferred_excessive;
1739	data[i++] = np->xstats.tx_aborted;
1740	data[i++] = np->xstats.tx_bcasts;
1741	data[i++] = np->xstats.rx_bcasts;
1742	data[i++] = np->xstats.tx_mcasts;
1743	data[i++] = np->xstats.rx_mcasts;
1744}
1745
1746#ifdef CONFIG_PM
1747
1748static void sundance_get_wol(struct net_device *dev,
1749		struct ethtool_wolinfo *wol)
1750{
1751	struct netdev_private *np = netdev_priv(dev);
1752	void __iomem *ioaddr = np->base;
1753	u8 wol_bits;
1754
1755	wol->wolopts = 0;
1756
1757	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1758	if (!np->wol_enabled)
1759		return;
1760
1761	wol_bits = ioread8(ioaddr + WakeEvent);
1762	if (wol_bits & MagicPktEnable)
1763		wol->wolopts |= WAKE_MAGIC;
1764	if (wol_bits & LinkEventEnable)
1765		wol->wolopts |= WAKE_PHY;
1766}
1767
1768static int sundance_set_wol(struct net_device *dev,
1769	struct ethtool_wolinfo *wol)
1770{
1771	struct netdev_private *np = netdev_priv(dev);
1772	void __iomem *ioaddr = np->base;
1773	u8 wol_bits;
1774
1775	if (!device_can_wakeup(&np->pci_dev->dev))
1776		return -EOPNOTSUPP;
1777
1778	np->wol_enabled = !!(wol->wolopts);
1779	wol_bits = ioread8(ioaddr + WakeEvent);
1780	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1781			LinkEventEnable | WolEnable);
1782
1783	if (np->wol_enabled) {
1784		if (wol->wolopts & WAKE_MAGIC)
1785			wol_bits |= (MagicPktEnable | WolEnable);
1786		if (wol->wolopts & WAKE_PHY)
1787			wol_bits |= (LinkEventEnable | WolEnable);
1788	}
1789	iowrite8(wol_bits, ioaddr + WakeEvent);
1790
1791	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1792
1793	return 0;
1794}
1795#else
1796#define sundance_get_wol NULL
1797#define sundance_set_wol NULL
1798#endif /* CONFIG_PM */
1799
1800static const struct ethtool_ops ethtool_ops = {
1801	.begin = check_if_running,
1802	.get_drvinfo = get_drvinfo,
1803	.nway_reset = nway_reset,
1804	.get_link = get_link,
1805	.get_wol = sundance_get_wol,
1806	.set_wol = sundance_set_wol,
1807	.get_msglevel = get_msglevel,
1808	.set_msglevel = set_msglevel,
1809	.get_strings = get_strings,
1810	.get_sset_count = get_sset_count,
1811	.get_ethtool_stats = get_ethtool_stats,
1812	.get_link_ksettings = get_link_ksettings,
1813	.set_link_ksettings = set_link_ksettings,
1814};
1815
1816static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1817{
1818	struct netdev_private *np = netdev_priv(dev);
1819	int rc;
1820
1821	if (!netif_running(dev))
1822		return -EINVAL;
1823
1824	spin_lock_irq(&np->lock);
1825	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1826	spin_unlock_irq(&np->lock);
1827
1828	return rc;
1829}
1830
1831static int netdev_close(struct net_device *dev)
1832{
1833	struct netdev_private *np = netdev_priv(dev);
1834	void __iomem *ioaddr = np->base;
1835	struct sk_buff *skb;
1836	int i;
1837
1838	/* Wait and kill tasklet */
1839	tasklet_kill(&np->rx_tasklet);
1840	tasklet_kill(&np->tx_tasklet);
1841	np->cur_tx = 0;
1842	np->dirty_tx = 0;
1843	np->cur_task = 0;
1844	np->last_tx = NULL;
1845
1846	netif_stop_queue(dev);
1847
1848	if (netif_msg_ifdown(np)) {
1849		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1850			   "Rx %4.4x Int %2.2x.\n",
1851			   dev->name, ioread8(ioaddr + TxStatus),
1852			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1853		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1854			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1855	}
1856
1857	/* Disable interrupts by clearing the interrupt mask. */
1858	iowrite16(0x0000, ioaddr + IntrEnable);
1859
1860	/* Disable Rx and Tx DMA for safely release resource */
1861	iowrite32(0x500, ioaddr + DMACtrl);
1862
1863	/* Stop the chip's Tx and Rx processes. */
1864	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1865
1866    	for (i = 2000; i > 0; i--) {
1867 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1868			break;
1869		mdelay(1);
1870    	}
1871
1872    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1873			ioaddr + ASIC_HI_WORD(ASICCtrl));
1874
1875    	for (i = 2000; i > 0; i--) {
1876		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1877			break;
1878		mdelay(1);
1879    	}
1880
1881#ifdef __i386__
1882	if (netif_msg_hw(np)) {
1883		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1884			   (int)(np->tx_ring_dma));
1885		for (i = 0; i < TX_RING_SIZE; i++)
1886			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1887				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1888				   np->tx_ring[i].frag[0].length);
1889		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1890			   (int)(np->rx_ring_dma));
1891		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1892			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1893				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1894				   np->rx_ring[i].frag[0].length);
1895		}
1896	}
1897#endif /* __i386__ debugging only */
1898
1899	free_irq(np->pci_dev->irq, dev);
1900
1901	del_timer_sync(&np->timer);
1902
1903	/* Free all the skbuffs in the Rx queue. */
1904	for (i = 0; i < RX_RING_SIZE; i++) {
1905		np->rx_ring[i].status = 0;
1906		skb = np->rx_skbuff[i];
1907		if (skb) {
1908			dma_unmap_single(&np->pci_dev->dev,
1909				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1910				np->rx_buf_sz, DMA_FROM_DEVICE);
1911			dev_kfree_skb(skb);
1912			np->rx_skbuff[i] = NULL;
1913		}
1914		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1915	}
1916	for (i = 0; i < TX_RING_SIZE; i++) {
1917		np->tx_ring[i].next_desc = 0;
1918		skb = np->tx_skbuff[i];
1919		if (skb) {
1920			dma_unmap_single(&np->pci_dev->dev,
1921				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1922				skb->len, DMA_TO_DEVICE);
1923			dev_kfree_skb(skb);
1924			np->tx_skbuff[i] = NULL;
1925		}
1926	}
1927
1928	return 0;
1929}
1930
1931static void sundance_remove1(struct pci_dev *pdev)
1932{
1933	struct net_device *dev = pci_get_drvdata(pdev);
1934
1935	if (dev) {
1936	    struct netdev_private *np = netdev_priv(dev);
1937	    unregister_netdev(dev);
1938	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1939		    np->rx_ring, np->rx_ring_dma);
1940	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1941		    np->tx_ring, np->tx_ring_dma);
1942	    pci_iounmap(pdev, np->base);
1943	    pci_release_regions(pdev);
1944	    free_netdev(dev);
1945	}
1946}
1947
1948#ifdef CONFIG_PM
1949
1950static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1951{
1952	struct net_device *dev = pci_get_drvdata(pci_dev);
1953	struct netdev_private *np = netdev_priv(dev);
1954	void __iomem *ioaddr = np->base;
1955
1956	if (!netif_running(dev))
1957		return 0;
1958
1959	netdev_close(dev);
1960	netif_device_detach(dev);
1961
1962	pci_save_state(pci_dev);
1963	if (np->wol_enabled) {
1964		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1965		iowrite16(RxEnable, ioaddr + MACCtrl1);
1966	}
1967	pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1968			np->wol_enabled);
1969	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1970
1971	return 0;
1972}
1973
1974static int sundance_resume(struct pci_dev *pci_dev)
1975{
1976	struct net_device *dev = pci_get_drvdata(pci_dev);
1977	int err = 0;
1978
1979	if (!netif_running(dev))
1980		return 0;
1981
1982	pci_set_power_state(pci_dev, PCI_D0);
1983	pci_restore_state(pci_dev);
1984	pci_enable_wake(pci_dev, PCI_D0, 0);
1985
1986	err = netdev_open(dev);
1987	if (err) {
1988		printk(KERN_ERR "%s: Can't resume interface!\n",
1989				dev->name);
1990		goto out;
1991	}
1992
1993	netif_device_attach(dev);
1994
1995out:
1996	return err;
1997}
1998
1999#endif /* CONFIG_PM */
2000
2001static struct pci_driver sundance_driver = {
2002	.name		= DRV_NAME,
2003	.id_table	= sundance_pci_tbl,
2004	.probe		= sundance_probe1,
2005	.remove		= sundance_remove1,
2006#ifdef CONFIG_PM
2007	.suspend	= sundance_suspend,
2008	.resume		= sundance_resume,
2009#endif /* CONFIG_PM */
2010};
2011
2012static int __init sundance_init(void)
2013{
2014/* when a module, this is printed whether or not devices are found in probe */
2015#ifdef MODULE
2016	printk(version);
2017#endif
2018	return pci_register_driver(&sundance_driver);
2019}
2020
2021static void __exit sundance_exit(void)
2022{
2023	pci_unregister_driver(&sundance_driver);
2024}
2025
2026module_init(sundance_init);
2027module_exit(sundance_exit);
2028
2029
v5.9
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	https://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
 
 
 
  26
  27/* The user-configurable values.
  28   These may be modified when a driver module is loaded.*/
  29static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  30/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  31   Typical is a 64 element hash table based on the Ethernet CRC.  */
  32static const int multicast_filter_limit = 32;
  33
  34/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  35   Setting to > 1518 effectively disables this feature.
  36   This chip can receive into offset buffers, so the Alpha does not
  37   need a copy-align. */
  38static int rx_copybreak;
  39static int flowctrl=1;
  40
  41/* media[] specifies the media type the NIC operates at.
  42		 autosense	Autosensing active media.
  43		 10mbps_hd 	10Mbps half duplex.
  44		 10mbps_fd 	10Mbps full duplex.
  45		 100mbps_hd 	100Mbps half duplex.
  46		 100mbps_fd 	100Mbps full duplex.
  47		 0		Autosensing active media.
  48		 1	 	10Mbps half duplex.
  49		 2	 	10Mbps full duplex.
  50		 3	 	100Mbps half duplex.
  51		 4	 	100Mbps full duplex.
  52*/
  53#define MAX_UNITS 8
  54static char *media[MAX_UNITS];
  55
  56
  57/* Operational parameters that are set at compile time. */
  58
  59/* Keep the ring sizes a power of two for compile efficiency.
  60   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  61   Making the Tx ring too large decreases the effectiveness of channel
  62   bonding and packet priority, and more than 128 requires modifying the
  63   Tx error recovery.
  64   Large receive rings merely waste memory. */
  65#define TX_RING_SIZE	32
  66#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  67#define RX_RING_SIZE	64
  68#define RX_BUDGET	32
  69#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  70#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  71
  72/* Operational parameters that usually are not changed. */
  73/* Time in jiffies before concluding the transmitter is hung. */
  74#define TX_TIMEOUT  (4*HZ)
  75#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  76
  77/* Include files, designed to support most kernel versions 2.0.0 and later. */
  78#include <linux/module.h>
  79#include <linux/kernel.h>
  80#include <linux/string.h>
  81#include <linux/timer.h>
  82#include <linux/errno.h>
  83#include <linux/ioport.h>
  84#include <linux/interrupt.h>
  85#include <linux/pci.h>
  86#include <linux/netdevice.h>
  87#include <linux/etherdevice.h>
  88#include <linux/skbuff.h>
  89#include <linux/init.h>
  90#include <linux/bitops.h>
  91#include <linux/uaccess.h>
  92#include <asm/processor.h>		/* Processor type for cache alignment. */
  93#include <asm/io.h>
  94#include <linux/delay.h>
  95#include <linux/spinlock.h>
  96#include <linux/dma-mapping.h>
  97#include <linux/crc32.h>
  98#include <linux/ethtool.h>
  99#include <linux/mii.h>
 100
 
 
 
 
 
 101MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 102MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 103MODULE_LICENSE("GPL");
 104
 105module_param(debug, int, 0);
 106module_param(rx_copybreak, int, 0);
 107module_param_array(media, charp, NULL, 0);
 108module_param(flowctrl, int, 0);
 109MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 110MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 111MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 112
 113/*
 114				Theory of Operation
 115
 116I. Board Compatibility
 117
 118This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 119
 120II. Board-specific settings
 121
 122III. Driver operation
 123
 124IIIa. Ring buffers
 125
 126This driver uses two statically allocated fixed-size descriptor lists
 127formed into rings by a branch from the final descriptor to the beginning of
 128the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 129Some chips explicitly use only 2^N sized rings, while others use a
 130'next descriptor' pointer that the driver forms into rings.
 131
 132IIIb/c. Transmit/Receive Structure
 133
 134This driver uses a zero-copy receive and transmit scheme.
 135The driver allocates full frame size skbuffs for the Rx ring buffers at
 136open() time and passes the skb->data field to the chip as receive data
 137buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 138a fresh skbuff is allocated and the frame is copied to the new skbuff.
 139When the incoming frame is larger, the skbuff is passed directly up the
 140protocol stack.  Buffers consumed this way are replaced by newly allocated
 141skbuffs in a later phase of receives.
 142
 143The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 144using a full-sized skbuff for small frames vs. the copying costs of larger
 145frames.  New boards are typically used in generously configured machines
 146and the underfilled buffers have negligible impact compared to the benefit of
 147a single allocation size, so the default value of zero results in never
 148copying packets.  When copying is done, the cost is usually mitigated by using
 149a combined copy/checksum routine.  Copying also preloads the cache, which is
 150most useful with small frames.
 151
 152A subtle aspect of the operation is that the IP header at offset 14 in an
 153ethernet frame isn't longword aligned for further processing.
 154Unaligned buffers are permitted by the Sundance hardware, so
 155frames are received into the skbuff at an offset of "+2", 16-byte aligning
 156the IP header.
 157
 158IIId. Synchronization
 159
 160The driver runs as two independent, single-threaded flows of control.  One
 161is the send-packet routine, which enforces single-threaded use by the
 162dev->tbusy flag.  The other thread is the interrupt handler, which is single
 163threaded by the hardware and interrupt handling software.
 164
 165The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 166flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 167queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 168the 'lp->tx_full' flag.
 169
 170The interrupt handler has exclusive control over the Rx ring and records stats
 171from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 172empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 173clears both the tx_full and tbusy flags.
 174
 175IV. Notes
 176
 177IVb. References
 178
 179The Sundance ST201 datasheet, preliminary version.
 180The Kendin KS8723 datasheet, preliminary version.
 181The ICplus IP100 datasheet, preliminary version.
 182http://www.scyld.com/expert/100mbps.html
 183http://www.scyld.com/expert/NWay.html
 184
 185IVc. Errata
 186
 187*/
 188
 189/* Work-around for Kendin chip bugs. */
 190#ifndef CONFIG_SUNDANCE_MMIO
 191#define USE_IO_OPS 1
 192#endif
 193
 194static const struct pci_device_id sundance_pci_tbl[] = {
 195	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 196	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 197	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 198	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 199	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 200	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 201	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 202	{ }
 203};
 204MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 205
 206enum {
 207	netdev_io_size = 128
 208};
 209
 210struct pci_id_info {
 211        const char *name;
 212};
 213static const struct pci_id_info pci_id_tbl[] = {
 214	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 215	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 216	{"D-Link DFE-580TX 4 port Server Adapter"},
 217	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 218	{"D-Link DL10050-based FAST Ethernet Adapter"},
 219	{"Sundance Technology Alta"},
 220	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 221	{ }	/* terminate list. */
 222};
 223
 224/* This driver was written to use PCI memory space, however x86-oriented
 225   hardware often uses I/O space accesses. */
 226
 227/* Offsets to the device registers.
 228   Unlike software-only systems, device drivers interact with complex hardware.
 229   It's not useful to define symbolic names for every register bit in the
 230   device.  The name can only partially document the semantics and make
 231   the driver longer and more difficult to read.
 232   In general, only the important configuration values or bits changed
 233   multiple times should be defined symbolically.
 234*/
 235enum alta_offsets {
 236	DMACtrl = 0x00,
 237	TxListPtr = 0x04,
 238	TxDMABurstThresh = 0x08,
 239	TxDMAUrgentThresh = 0x09,
 240	TxDMAPollPeriod = 0x0a,
 241	RxDMAStatus = 0x0c,
 242	RxListPtr = 0x10,
 243	DebugCtrl0 = 0x1a,
 244	DebugCtrl1 = 0x1c,
 245	RxDMABurstThresh = 0x14,
 246	RxDMAUrgentThresh = 0x15,
 247	RxDMAPollPeriod = 0x16,
 248	LEDCtrl = 0x1a,
 249	ASICCtrl = 0x30,
 250	EEData = 0x34,
 251	EECtrl = 0x36,
 252	FlashAddr = 0x40,
 253	FlashData = 0x44,
 254	WakeEvent = 0x45,
 255	TxStatus = 0x46,
 256	TxFrameId = 0x47,
 257	DownCounter = 0x18,
 258	IntrClear = 0x4a,
 259	IntrEnable = 0x4c,
 260	IntrStatus = 0x4e,
 261	MACCtrl0 = 0x50,
 262	MACCtrl1 = 0x52,
 263	StationAddr = 0x54,
 264	MaxFrameSize = 0x5A,
 265	RxMode = 0x5c,
 266	MIICtrl = 0x5e,
 267	MulticastFilter0 = 0x60,
 268	MulticastFilter1 = 0x64,
 269	RxOctetsLow = 0x68,
 270	RxOctetsHigh = 0x6a,
 271	TxOctetsLow = 0x6c,
 272	TxOctetsHigh = 0x6e,
 273	TxFramesOK = 0x70,
 274	RxFramesOK = 0x72,
 275	StatsCarrierError = 0x74,
 276	StatsLateColl = 0x75,
 277	StatsMultiColl = 0x76,
 278	StatsOneColl = 0x77,
 279	StatsTxDefer = 0x78,
 280	RxMissed = 0x79,
 281	StatsTxXSDefer = 0x7a,
 282	StatsTxAbort = 0x7b,
 283	StatsBcastTx = 0x7c,
 284	StatsBcastRx = 0x7d,
 285	StatsMcastTx = 0x7e,
 286	StatsMcastRx = 0x7f,
 287	/* Aliased and bogus values! */
 288	RxStatus = 0x0c,
 289};
 290
 291#define ASIC_HI_WORD(x)	((x) + 2)
 292
 293enum ASICCtrl_HiWord_bit {
 294	GlobalReset = 0x0001,
 295	RxReset = 0x0002,
 296	TxReset = 0x0004,
 297	DMAReset = 0x0008,
 298	FIFOReset = 0x0010,
 299	NetworkReset = 0x0020,
 300	HostReset = 0x0040,
 301	ResetBusy = 0x0400,
 302};
 303
 304/* Bits in the interrupt status/mask registers. */
 305enum intr_status_bits {
 306	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 307	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 308	IntrDrvRqst=0x0040,
 309	StatsMax=0x0080, LinkChange=0x0100,
 310	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 311};
 312
 313/* Bits in the RxMode register. */
 314enum rx_mode_bits {
 315	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 316	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 317};
 318/* Bits in MACCtrl. */
 319enum mac_ctrl0_bits {
 320	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 321	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 322};
 323enum mac_ctrl1_bits {
 324	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 325	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 326	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 327};
 328
 329/* Bits in WakeEvent register. */
 330enum wake_event_bits {
 331	WakePktEnable = 0x01,
 332	MagicPktEnable = 0x02,
 333	LinkEventEnable = 0x04,
 334	WolEnable = 0x80,
 335};
 336
 337/* The Rx and Tx buffer descriptors. */
 338/* Note that using only 32 bit fields simplifies conversion to big-endian
 339   architectures. */
 340struct netdev_desc {
 341	__le32 next_desc;
 342	__le32 status;
 343	struct desc_frag { __le32 addr, length; } frag[1];
 344};
 345
 346/* Bits in netdev_desc.status */
 347enum desc_status_bits {
 348	DescOwn=0x8000,
 349	DescEndPacket=0x4000,
 350	DescEndRing=0x2000,
 351	LastFrag=0x80000000,
 352	DescIntrOnTx=0x8000,
 353	DescIntrOnDMADone=0x80000000,
 354	DisableAlign = 0x00000001,
 355};
 356
 357#define PRIV_ALIGN	15 	/* Required alignment mask */
 358/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 359   within the structure. */
 360#define MII_CNT		4
 361struct netdev_private {
 362	/* Descriptor rings first for alignment. */
 363	struct netdev_desc *rx_ring;
 364	struct netdev_desc *tx_ring;
 365	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 366	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 367        dma_addr_t tx_ring_dma;
 368        dma_addr_t rx_ring_dma;
 369	struct timer_list timer;		/* Media monitoring timer. */
 370	/* ethtool extra stats */
 371	struct {
 372		u64 tx_multiple_collisions;
 373		u64 tx_single_collisions;
 374		u64 tx_late_collisions;
 375		u64 tx_deferred;
 376		u64 tx_deferred_excessive;
 377		u64 tx_aborted;
 378		u64 tx_bcasts;
 379		u64 rx_bcasts;
 380		u64 tx_mcasts;
 381		u64 rx_mcasts;
 382	} xstats;
 383	/* Frequently used values: keep some adjacent for cache effect. */
 384	spinlock_t lock;
 385	int msg_enable;
 386	int chip_id;
 387	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 388	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 389	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 390	unsigned int cur_tx, dirty_tx;
 391	/* These values are keep track of the transceiver/media in use. */
 392	unsigned int flowctrl:1;
 393	unsigned int default_port:4;		/* Last dev->if_port value. */
 394	unsigned int an_enable:1;
 395	unsigned int speed;
 396	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
 397	struct tasklet_struct rx_tasklet;
 398	struct tasklet_struct tx_tasklet;
 399	int budget;
 400	int cur_task;
 401	/* Multicast and receive mode. */
 402	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 403	u16 mcast_filter[4];
 404	/* MII transceiver section. */
 405	struct mii_if_info mii_if;
 406	int mii_preamble_required;
 407	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 408	struct pci_dev *pci_dev;
 409	void __iomem *base;
 410	spinlock_t statlock;
 411};
 412
 413/* The station address location in the EEPROM. */
 414#define EEPROM_SA_OFFSET	0x10
 415#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 416			IntrDrvRqst | IntrTxDone | StatsMax | \
 417			LinkChange)
 418
 419static int  change_mtu(struct net_device *dev, int new_mtu);
 420static int  eeprom_read(void __iomem *ioaddr, int location);
 421static int  mdio_read(struct net_device *dev, int phy_id, int location);
 422static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 423static int  mdio_wait_link(struct net_device *dev, int wait);
 424static int  netdev_open(struct net_device *dev);
 425static void check_duplex(struct net_device *dev);
 426static void netdev_timer(struct timer_list *t);
 427static void tx_timeout(struct net_device *dev, unsigned int txqueue);
 428static void init_ring(struct net_device *dev);
 429static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 430static int reset_tx (struct net_device *dev);
 431static irqreturn_t intr_handler(int irq, void *dev_instance);
 432static void rx_poll(unsigned long data);
 433static void tx_poll(unsigned long data);
 434static void refill_rx (struct net_device *dev);
 435static void netdev_error(struct net_device *dev, int intr_status);
 436static void netdev_error(struct net_device *dev, int intr_status);
 437static void set_rx_mode(struct net_device *dev);
 438static int __set_mac_addr(struct net_device *dev);
 439static int sundance_set_mac_addr(struct net_device *dev, void *data);
 440static struct net_device_stats *get_stats(struct net_device *dev);
 441static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 442static int  netdev_close(struct net_device *dev);
 443static const struct ethtool_ops ethtool_ops;
 444
 445static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 446{
 447	struct netdev_private *np = netdev_priv(dev);
 448	void __iomem *ioaddr = np->base + ASICCtrl;
 449	int countdown;
 450
 451	/* ST201 documentation states ASICCtrl is a 32bit register */
 452	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 453	/* ST201 documentation states reset can take up to 1 ms */
 454	countdown = 10 + 1;
 455	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 456		if (--countdown == 0) {
 457			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 458			break;
 459		}
 460		udelay(100);
 461	}
 462}
 463
 464#ifdef CONFIG_NET_POLL_CONTROLLER
 465static void sundance_poll_controller(struct net_device *dev)
 466{
 467	struct netdev_private *np = netdev_priv(dev);
 468
 469	disable_irq(np->pci_dev->irq);
 470	intr_handler(np->pci_dev->irq, dev);
 471	enable_irq(np->pci_dev->irq);
 472}
 473#endif
 474
 475static const struct net_device_ops netdev_ops = {
 476	.ndo_open		= netdev_open,
 477	.ndo_stop		= netdev_close,
 478	.ndo_start_xmit		= start_tx,
 479	.ndo_get_stats 		= get_stats,
 480	.ndo_set_rx_mode	= set_rx_mode,
 481	.ndo_do_ioctl 		= netdev_ioctl,
 482	.ndo_tx_timeout		= tx_timeout,
 483	.ndo_change_mtu		= change_mtu,
 484	.ndo_set_mac_address 	= sundance_set_mac_addr,
 485	.ndo_validate_addr	= eth_validate_addr,
 486#ifdef CONFIG_NET_POLL_CONTROLLER
 487	.ndo_poll_controller 	= sundance_poll_controller,
 488#endif
 489};
 490
 491static int sundance_probe1(struct pci_dev *pdev,
 492			   const struct pci_device_id *ent)
 493{
 494	struct net_device *dev;
 495	struct netdev_private *np;
 496	static int card_idx;
 497	int chip_idx = ent->driver_data;
 498	int irq;
 499	int i;
 500	void __iomem *ioaddr;
 501	u16 mii_ctl;
 502	void *ring_space;
 503	dma_addr_t ring_dma;
 504#ifdef USE_IO_OPS
 505	int bar = 0;
 506#else
 507	int bar = 1;
 508#endif
 509	int phy, phy_end, phy_idx = 0;
 510
 
 
 
 
 
 
 
 511	if (pci_enable_device(pdev))
 512		return -EIO;
 513	pci_set_master(pdev);
 514
 515	irq = pdev->irq;
 516
 517	dev = alloc_etherdev(sizeof(*np));
 518	if (!dev)
 519		return -ENOMEM;
 520	SET_NETDEV_DEV(dev, &pdev->dev);
 521
 522	if (pci_request_regions(pdev, DRV_NAME))
 523		goto err_out_netdev;
 524
 525	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 526	if (!ioaddr)
 527		goto err_out_res;
 528
 529	for (i = 0; i < 3; i++)
 530		((__le16 *)dev->dev_addr)[i] =
 531			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 532
 533	np = netdev_priv(dev);
 534	np->base = ioaddr;
 535	np->pci_dev = pdev;
 536	np->chip_id = chip_idx;
 537	np->msg_enable = (1 << debug) - 1;
 538	spin_lock_init(&np->lock);
 539	spin_lock_init(&np->statlock);
 540	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 541	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 542
 543	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 544			&ring_dma, GFP_KERNEL);
 545	if (!ring_space)
 546		goto err_out_cleardev;
 547	np->tx_ring = (struct netdev_desc *)ring_space;
 548	np->tx_ring_dma = ring_dma;
 549
 550	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 551			&ring_dma, GFP_KERNEL);
 552	if (!ring_space)
 553		goto err_out_unmap_tx;
 554	np->rx_ring = (struct netdev_desc *)ring_space;
 555	np->rx_ring_dma = ring_dma;
 556
 557	np->mii_if.dev = dev;
 558	np->mii_if.mdio_read = mdio_read;
 559	np->mii_if.mdio_write = mdio_write;
 560	np->mii_if.phy_id_mask = 0x1f;
 561	np->mii_if.reg_num_mask = 0x1f;
 562
 563	/* The chip-specific entries in the device structure. */
 564	dev->netdev_ops = &netdev_ops;
 565	dev->ethtool_ops = &ethtool_ops;
 566	dev->watchdog_timeo = TX_TIMEOUT;
 567
 568	/* MTU range: 68 - 8191 */
 569	dev->min_mtu = ETH_MIN_MTU;
 570	dev->max_mtu = 8191;
 571
 572	pci_set_drvdata(pdev, dev);
 573
 574	i = register_netdev(dev);
 575	if (i)
 576		goto err_out_unmap_rx;
 577
 578	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 579	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 580	       dev->dev_addr, irq);
 581
 582	np->phys[0] = 1;		/* Default setting */
 583	np->mii_preamble_required++;
 584
 585	/*
 586	 * It seems some phys doesn't deal well with address 0 being accessed
 587	 * first
 588	 */
 589	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 590		phy = 0;
 591		phy_end = 31;
 592	} else {
 593		phy = 1;
 594		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 595	}
 596	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 597		int phyx = phy & 0x1f;
 598		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 599		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 600			np->phys[phy_idx++] = phyx;
 601			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 602			if ((mii_status & 0x0040) == 0)
 603				np->mii_preamble_required++;
 604			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 605				   "0x%4.4x advertising %4.4x.\n",
 606				   dev->name, phyx, mii_status, np->mii_if.advertising);
 607		}
 608	}
 609	np->mii_preamble_required--;
 610
 611	if (phy_idx == 0) {
 612		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 613			   dev->name, ioread32(ioaddr + ASICCtrl));
 614		goto err_out_unregister;
 615	}
 616
 617	np->mii_if.phy_id = np->phys[0];
 618
 619	/* Parse override configuration */
 620	np->an_enable = 1;
 621	if (card_idx < MAX_UNITS) {
 622		if (media[card_idx] != NULL) {
 623			np->an_enable = 0;
 624			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 625			    strcmp (media[card_idx], "4") == 0) {
 626				np->speed = 100;
 627				np->mii_if.full_duplex = 1;
 628			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 629				   strcmp (media[card_idx], "3") == 0) {
 630				np->speed = 100;
 631				np->mii_if.full_duplex = 0;
 632			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 633				   strcmp (media[card_idx], "2") == 0) {
 634				np->speed = 10;
 635				np->mii_if.full_duplex = 1;
 636			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 637				   strcmp (media[card_idx], "1") == 0) {
 638				np->speed = 10;
 639				np->mii_if.full_duplex = 0;
 640			} else {
 641				np->an_enable = 1;
 642			}
 643		}
 644		if (flowctrl == 1)
 645			np->flowctrl = 1;
 646	}
 647
 648	/* Fibre PHY? */
 649	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 650		/* Default 100Mbps Full */
 651		if (np->an_enable) {
 652			np->speed = 100;
 653			np->mii_if.full_duplex = 1;
 654			np->an_enable = 0;
 655		}
 656	}
 657	/* Reset PHY */
 658	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 659	mdelay (300);
 660	/* If flow control enabled, we need to advertise it.*/
 661	if (np->flowctrl)
 662		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 663	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 664	/* Force media type */
 665	if (!np->an_enable) {
 666		mii_ctl = 0;
 667		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 668		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 669		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 670		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 671			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 672
 673	}
 674
 675	/* Perhaps move the reset here? */
 676	/* Reset the chip to erase previous misconfiguration. */
 677	if (netif_msg_hw(np))
 678		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 679	sundance_reset(dev, 0x00ff << 16);
 680	if (netif_msg_hw(np))
 681		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 682
 683	card_idx++;
 684	return 0;
 685
 686err_out_unregister:
 687	unregister_netdev(dev);
 688err_out_unmap_rx:
 689	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 690		np->rx_ring, np->rx_ring_dma);
 691err_out_unmap_tx:
 692	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 693		np->tx_ring, np->tx_ring_dma);
 694err_out_cleardev:
 695	pci_iounmap(pdev, ioaddr);
 696err_out_res:
 697	pci_release_regions(pdev);
 698err_out_netdev:
 699	free_netdev (dev);
 700	return -ENODEV;
 701}
 702
 703static int change_mtu(struct net_device *dev, int new_mtu)
 704{
 705	if (netif_running(dev))
 706		return -EBUSY;
 707	dev->mtu = new_mtu;
 708	return 0;
 709}
 710
 711#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 712/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 713static int eeprom_read(void __iomem *ioaddr, int location)
 714{
 715	int boguscnt = 10000;		/* Typical 1900 ticks. */
 716	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 717	do {
 718		eeprom_delay(ioaddr + EECtrl);
 719		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 720			return ioread16(ioaddr + EEData);
 721		}
 722	} while (--boguscnt > 0);
 723	return 0;
 724}
 725
 726/*  MII transceiver control section.
 727	Read and write the MII registers using software-generated serial
 728	MDIO protocol.  See the MII specifications or DP83840A data sheet
 729	for details.
 730
 731	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 732	met by back-to-back 33Mhz PCI cycles. */
 733#define mdio_delay() ioread8(mdio_addr)
 734
 735enum mii_reg_bits {
 736	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 737};
 738#define MDIO_EnbIn  (0)
 739#define MDIO_WRITE0 (MDIO_EnbOutput)
 740#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 741
 742/* Generate the preamble required for initial synchronization and
 743   a few older transceivers. */
 744static void mdio_sync(void __iomem *mdio_addr)
 745{
 746	int bits = 32;
 747
 748	/* Establish sync by sending at least 32 logic ones. */
 749	while (--bits >= 0) {
 750		iowrite8(MDIO_WRITE1, mdio_addr);
 751		mdio_delay();
 752		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 753		mdio_delay();
 754	}
 755}
 756
 757static int mdio_read(struct net_device *dev, int phy_id, int location)
 758{
 759	struct netdev_private *np = netdev_priv(dev);
 760	void __iomem *mdio_addr = np->base + MIICtrl;
 761	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 762	int i, retval = 0;
 763
 764	if (np->mii_preamble_required)
 765		mdio_sync(mdio_addr);
 766
 767	/* Shift the read command bits out. */
 768	for (i = 15; i >= 0; i--) {
 769		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 770
 771		iowrite8(dataval, mdio_addr);
 772		mdio_delay();
 773		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 774		mdio_delay();
 775	}
 776	/* Read the two transition, 16 data, and wire-idle bits. */
 777	for (i = 19; i > 0; i--) {
 778		iowrite8(MDIO_EnbIn, mdio_addr);
 779		mdio_delay();
 780		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 781		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 782		mdio_delay();
 783	}
 784	return (retval>>1) & 0xffff;
 785}
 786
 787static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 788{
 789	struct netdev_private *np = netdev_priv(dev);
 790	void __iomem *mdio_addr = np->base + MIICtrl;
 791	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 792	int i;
 793
 794	if (np->mii_preamble_required)
 795		mdio_sync(mdio_addr);
 796
 797	/* Shift the command bits out. */
 798	for (i = 31; i >= 0; i--) {
 799		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 800
 801		iowrite8(dataval, mdio_addr);
 802		mdio_delay();
 803		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 804		mdio_delay();
 805	}
 806	/* Clear out extra bits. */
 807	for (i = 2; i > 0; i--) {
 808		iowrite8(MDIO_EnbIn, mdio_addr);
 809		mdio_delay();
 810		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 811		mdio_delay();
 812	}
 813}
 814
 815static int mdio_wait_link(struct net_device *dev, int wait)
 816{
 817	int bmsr;
 818	int phy_id;
 819	struct netdev_private *np;
 820
 821	np = netdev_priv(dev);
 822	phy_id = np->phys[0];
 823
 824	do {
 825		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 826		if (bmsr & 0x0004)
 827			return 0;
 828		mdelay(1);
 829	} while (--wait > 0);
 830	return -1;
 831}
 832
 833static int netdev_open(struct net_device *dev)
 834{
 835	struct netdev_private *np = netdev_priv(dev);
 836	void __iomem *ioaddr = np->base;
 837	const int irq = np->pci_dev->irq;
 838	unsigned long flags;
 839	int i;
 840
 841	sundance_reset(dev, 0x00ff << 16);
 842
 843	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 844	if (i)
 845		return i;
 846
 847	if (netif_msg_ifup(np))
 848		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 849
 850	init_ring(dev);
 851
 852	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 853	/* The Tx list pointer is written as packets are queued. */
 854
 855	/* Initialize other registers. */
 856	__set_mac_addr(dev);
 857#if IS_ENABLED(CONFIG_VLAN_8021Q)
 858	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 859#else
 860	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 861#endif
 862	if (dev->mtu > 2047)
 863		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 864
 865	/* Configure the PCI bus bursts and FIFO thresholds. */
 866
 867	if (dev->if_port == 0)
 868		dev->if_port = np->default_port;
 869
 870	spin_lock_init(&np->mcastlock);
 871
 872	set_rx_mode(dev);
 873	iowrite16(0, ioaddr + IntrEnable);
 874	iowrite16(0, ioaddr + DownCounter);
 875	/* Set the chip to poll every N*320nsec. */
 876	iowrite8(100, ioaddr + RxDMAPollPeriod);
 877	iowrite8(127, ioaddr + TxDMAPollPeriod);
 878	/* Fix DFE-580TX packet drop issue */
 879	if (np->pci_dev->revision >= 0x14)
 880		iowrite8(0x01, ioaddr + DebugCtrl1);
 881	netif_start_queue(dev);
 882
 883	spin_lock_irqsave(&np->lock, flags);
 884	reset_tx(dev);
 885	spin_unlock_irqrestore(&np->lock, flags);
 886
 887	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 888
 889	/* Disable Wol */
 890	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 891	np->wol_enabled = 0;
 892
 893	if (netif_msg_ifup(np))
 894		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 895			   "MAC Control %x, %4.4x %4.4x.\n",
 896			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 897			   ioread32(ioaddr + MACCtrl0),
 898			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 899
 900	/* Set the timer to check for link beat. */
 901	timer_setup(&np->timer, netdev_timer, 0);
 902	np->timer.expires = jiffies + 3*HZ;
 903	add_timer(&np->timer);
 904
 905	/* Enable interrupts by setting the interrupt mask. */
 906	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 907
 908	return 0;
 909}
 910
 911static void check_duplex(struct net_device *dev)
 912{
 913	struct netdev_private *np = netdev_priv(dev);
 914	void __iomem *ioaddr = np->base;
 915	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 916	int negotiated = mii_lpa & np->mii_if.advertising;
 917	int duplex;
 918
 919	/* Force media */
 920	if (!np->an_enable || mii_lpa == 0xffff) {
 921		if (np->mii_if.full_duplex)
 922			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 923				ioaddr + MACCtrl0);
 924		return;
 925	}
 926
 927	/* Autonegotiation */
 928	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 929	if (np->mii_if.full_duplex != duplex) {
 930		np->mii_if.full_duplex = duplex;
 931		if (netif_msg_link(np))
 932			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 933				   "negotiated capability %4.4x.\n", dev->name,
 934				   duplex ? "full" : "half", np->phys[0], negotiated);
 935		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 936	}
 937}
 938
 939static void netdev_timer(struct timer_list *t)
 940{
 941	struct netdev_private *np = from_timer(np, t, timer);
 942	struct net_device *dev = np->mii_if.dev;
 943	void __iomem *ioaddr = np->base;
 944	int next_tick = 10*HZ;
 945
 946	if (netif_msg_timer(np)) {
 947		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 948			   "Tx %x Rx %x.\n",
 949			   dev->name, ioread16(ioaddr + IntrEnable),
 950			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 951	}
 952	check_duplex(dev);
 953	np->timer.expires = jiffies + next_tick;
 954	add_timer(&np->timer);
 955}
 956
 957static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 958{
 959	struct netdev_private *np = netdev_priv(dev);
 960	void __iomem *ioaddr = np->base;
 961	unsigned long flag;
 962
 963	netif_stop_queue(dev);
 964	tasklet_disable(&np->tx_tasklet);
 965	iowrite16(0, ioaddr + IntrEnable);
 966	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 967		   "TxFrameId %2.2x,"
 968		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 969		   ioread8(ioaddr + TxFrameId));
 970
 971	{
 972		int i;
 973		for (i=0; i<TX_RING_SIZE; i++) {
 974			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 975				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 976				le32_to_cpu(np->tx_ring[i].next_desc),
 977				le32_to_cpu(np->tx_ring[i].status),
 978				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 979				le32_to_cpu(np->tx_ring[i].frag[0].addr),
 980				le32_to_cpu(np->tx_ring[i].frag[0].length));
 981		}
 982		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 983			ioread32(np->base + TxListPtr),
 984			netif_queue_stopped(dev));
 985		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 986			np->cur_tx, np->cur_tx % TX_RING_SIZE,
 987			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 988		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 989		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 990	}
 991	spin_lock_irqsave(&np->lock, flag);
 992
 993	/* Stop and restart the chip's Tx processes . */
 994	reset_tx(dev);
 995	spin_unlock_irqrestore(&np->lock, flag);
 996
 997	dev->if_port = 0;
 998
 999	netif_trans_update(dev); /* prevent tx timeout */
1000	dev->stats.tx_errors++;
1001	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1002		netif_wake_queue(dev);
1003	}
1004	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1005	tasklet_enable(&np->tx_tasklet);
1006}
1007
1008
1009/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1010static void init_ring(struct net_device *dev)
1011{
1012	struct netdev_private *np = netdev_priv(dev);
1013	int i;
1014
1015	np->cur_rx = np->cur_tx = 0;
1016	np->dirty_rx = np->dirty_tx = 0;
1017	np->cur_task = 0;
1018
1019	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1020
1021	/* Initialize all Rx descriptors. */
1022	for (i = 0; i < RX_RING_SIZE; i++) {
1023		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1024			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1025		np->rx_ring[i].status = 0;
1026		np->rx_ring[i].frag[0].length = 0;
1027		np->rx_skbuff[i] = NULL;
1028	}
1029
1030	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1031	for (i = 0; i < RX_RING_SIZE; i++) {
1032		struct sk_buff *skb =
1033			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1034		np->rx_skbuff[i] = skb;
1035		if (skb == NULL)
1036			break;
1037		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1038		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1039			dma_map_single(&np->pci_dev->dev, skb->data,
1040				np->rx_buf_sz, DMA_FROM_DEVICE));
1041		if (dma_mapping_error(&np->pci_dev->dev,
1042					np->rx_ring[i].frag[0].addr)) {
1043			dev_kfree_skb(skb);
1044			np->rx_skbuff[i] = NULL;
1045			break;
1046		}
1047		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1048	}
1049	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1050
1051	for (i = 0; i < TX_RING_SIZE; i++) {
1052		np->tx_skbuff[i] = NULL;
1053		np->tx_ring[i].status = 0;
1054	}
1055}
1056
1057static void tx_poll (unsigned long data)
1058{
1059	struct net_device *dev = (struct net_device *)data;
1060	struct netdev_private *np = netdev_priv(dev);
1061	unsigned head = np->cur_task % TX_RING_SIZE;
1062	struct netdev_desc *txdesc =
1063		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1064
1065	/* Chain the next pointer */
1066	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1067		int entry = np->cur_task % TX_RING_SIZE;
1068		txdesc = &np->tx_ring[entry];
1069		if (np->last_tx) {
1070			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1071				entry*sizeof(struct netdev_desc));
1072		}
1073		np->last_tx = txdesc;
1074	}
1075	/* Indicate the latest descriptor of tx ring */
1076	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1077
1078	if (ioread32 (np->base + TxListPtr) == 0)
1079		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1080			np->base + TxListPtr);
1081}
1082
1083static netdev_tx_t
1084start_tx (struct sk_buff *skb, struct net_device *dev)
1085{
1086	struct netdev_private *np = netdev_priv(dev);
1087	struct netdev_desc *txdesc;
1088	unsigned entry;
1089
1090	/* Calculate the next Tx descriptor entry. */
1091	entry = np->cur_tx % TX_RING_SIZE;
1092	np->tx_skbuff[entry] = skb;
1093	txdesc = &np->tx_ring[entry];
1094
1095	txdesc->next_desc = 0;
1096	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1097	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1098				skb->data, skb->len, DMA_TO_DEVICE));
1099	if (dma_mapping_error(&np->pci_dev->dev,
1100				txdesc->frag[0].addr))
1101			goto drop_frame;
1102	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1103
1104	/* Increment cur_tx before tasklet_schedule() */
1105	np->cur_tx++;
1106	mb();
1107	/* Schedule a tx_poll() task */
1108	tasklet_schedule(&np->tx_tasklet);
1109
1110	/* On some architectures: explicitly flush cache lines here. */
1111	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1112	    !netif_queue_stopped(dev)) {
1113		/* do nothing */
1114	} else {
1115		netif_stop_queue (dev);
1116	}
1117	if (netif_msg_tx_queued(np)) {
1118		printk (KERN_DEBUG
1119			"%s: Transmit frame #%d queued in slot %d.\n",
1120			dev->name, np->cur_tx, entry);
1121	}
1122	return NETDEV_TX_OK;
1123
1124drop_frame:
1125	dev_kfree_skb_any(skb);
1126	np->tx_skbuff[entry] = NULL;
1127	dev->stats.tx_dropped++;
1128	return NETDEV_TX_OK;
1129}
1130
1131/* Reset hardware tx and free all of tx buffers */
1132static int
1133reset_tx (struct net_device *dev)
1134{
1135	struct netdev_private *np = netdev_priv(dev);
1136	void __iomem *ioaddr = np->base;
1137	struct sk_buff *skb;
1138	int i;
1139
1140	/* Reset tx logic, TxListPtr will be cleaned */
1141	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1142	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1143
1144	/* free all tx skbuff */
1145	for (i = 0; i < TX_RING_SIZE; i++) {
1146		np->tx_ring[i].next_desc = 0;
1147
1148		skb = np->tx_skbuff[i];
1149		if (skb) {
1150			dma_unmap_single(&np->pci_dev->dev,
1151				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1152				skb->len, DMA_TO_DEVICE);
1153			dev_kfree_skb_any(skb);
1154			np->tx_skbuff[i] = NULL;
1155			dev->stats.tx_dropped++;
1156		}
1157	}
1158	np->cur_tx = np->dirty_tx = 0;
1159	np->cur_task = 0;
1160
1161	np->last_tx = NULL;
1162	iowrite8(127, ioaddr + TxDMAPollPeriod);
1163
1164	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1165	return 0;
1166}
1167
1168/* The interrupt handler cleans up after the Tx thread,
1169   and schedule a Rx thread work */
1170static irqreturn_t intr_handler(int irq, void *dev_instance)
1171{
1172	struct net_device *dev = (struct net_device *)dev_instance;
1173	struct netdev_private *np = netdev_priv(dev);
1174	void __iomem *ioaddr = np->base;
1175	int hw_frame_id;
1176	int tx_cnt;
1177	int tx_status;
1178	int handled = 0;
1179	int i;
1180
 
1181	do {
1182		int intr_status = ioread16(ioaddr + IntrStatus);
1183		iowrite16(intr_status, ioaddr + IntrStatus);
1184
1185		if (netif_msg_intr(np))
1186			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1187				   dev->name, intr_status);
1188
1189		if (!(intr_status & DEFAULT_INTR))
1190			break;
1191
1192		handled = 1;
1193
1194		if (intr_status & (IntrRxDMADone)) {
1195			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1196					ioaddr + IntrEnable);
1197			if (np->budget < 0)
1198				np->budget = RX_BUDGET;
1199			tasklet_schedule(&np->rx_tasklet);
1200		}
1201		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1202			tx_status = ioread16 (ioaddr + TxStatus);
1203			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1204				if (netif_msg_tx_done(np))
1205					printk
1206					    ("%s: Transmit status is %2.2x.\n",
1207				     	dev->name, tx_status);
1208				if (tx_status & 0x1e) {
1209					if (netif_msg_tx_err(np))
1210						printk("%s: Transmit error status %4.4x.\n",
1211							   dev->name, tx_status);
1212					dev->stats.tx_errors++;
1213					if (tx_status & 0x10)
1214						dev->stats.tx_fifo_errors++;
1215					if (tx_status & 0x08)
1216						dev->stats.collisions++;
1217					if (tx_status & 0x04)
1218						dev->stats.tx_fifo_errors++;
1219					if (tx_status & 0x02)
1220						dev->stats.tx_window_errors++;
1221
1222					/*
1223					** This reset has been verified on
1224					** DFE-580TX boards ! phdm@macqel.be.
1225					*/
1226					if (tx_status & 0x10) {	/* TxUnderrun */
1227						/* Restart Tx FIFO and transmitter */
1228						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1229						/* No need to reset the Tx pointer here */
1230					}
1231					/* Restart the Tx. Need to make sure tx enabled */
1232					i = 10;
1233					do {
1234						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1235						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1236							break;
1237						mdelay(1);
1238					} while (--i);
1239				}
1240				/* Yup, this is a documentation bug.  It cost me *hours*. */
1241				iowrite16 (0, ioaddr + TxStatus);
1242				if (tx_cnt < 0) {
1243					iowrite32(5000, ioaddr + DownCounter);
1244					break;
1245				}
1246				tx_status = ioread16 (ioaddr + TxStatus);
1247			}
1248			hw_frame_id = (tx_status >> 8) & 0xff;
1249		} else 	{
1250			hw_frame_id = ioread8(ioaddr + TxFrameId);
1251		}
1252
1253		if (np->pci_dev->revision >= 0x14) {
1254			spin_lock(&np->lock);
1255			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1256				int entry = np->dirty_tx % TX_RING_SIZE;
1257				struct sk_buff *skb;
1258				int sw_frame_id;
1259				sw_frame_id = (le32_to_cpu(
1260					np->tx_ring[entry].status) >> 2) & 0xff;
1261				if (sw_frame_id == hw_frame_id &&
1262					!(le32_to_cpu(np->tx_ring[entry].status)
1263					& 0x00010000))
1264						break;
1265				if (sw_frame_id == (hw_frame_id + 1) %
1266					TX_RING_SIZE)
1267						break;
1268				skb = np->tx_skbuff[entry];
1269				/* Free the original skb. */
1270				dma_unmap_single(&np->pci_dev->dev,
1271					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1272					skb->len, DMA_TO_DEVICE);
1273				dev_consume_skb_irq(np->tx_skbuff[entry]);
1274				np->tx_skbuff[entry] = NULL;
1275				np->tx_ring[entry].frag[0].addr = 0;
1276				np->tx_ring[entry].frag[0].length = 0;
1277			}
1278			spin_unlock(&np->lock);
1279		} else {
1280			spin_lock(&np->lock);
1281			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1282				int entry = np->dirty_tx % TX_RING_SIZE;
1283				struct sk_buff *skb;
1284				if (!(le32_to_cpu(np->tx_ring[entry].status)
1285							& 0x00010000))
1286					break;
1287				skb = np->tx_skbuff[entry];
1288				/* Free the original skb. */
1289				dma_unmap_single(&np->pci_dev->dev,
1290					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1291					skb->len, DMA_TO_DEVICE);
1292				dev_consume_skb_irq(np->tx_skbuff[entry]);
1293				np->tx_skbuff[entry] = NULL;
1294				np->tx_ring[entry].frag[0].addr = 0;
1295				np->tx_ring[entry].frag[0].length = 0;
1296			}
1297			spin_unlock(&np->lock);
1298		}
1299
1300		if (netif_queue_stopped(dev) &&
1301			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1302			/* The ring is no longer full, clear busy flag. */
1303			netif_wake_queue (dev);
1304		}
1305		/* Abnormal error summary/uncommon events handlers. */
1306		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1307			netdev_error(dev, intr_status);
1308	} while (0);
1309	if (netif_msg_intr(np))
1310		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1311			   dev->name, ioread16(ioaddr + IntrStatus));
1312	return IRQ_RETVAL(handled);
1313}
1314
1315static void rx_poll(unsigned long data)
1316{
1317	struct net_device *dev = (struct net_device *)data;
1318	struct netdev_private *np = netdev_priv(dev);
1319	int entry = np->cur_rx % RX_RING_SIZE;
1320	int boguscnt = np->budget;
1321	void __iomem *ioaddr = np->base;
1322	int received = 0;
1323
1324	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1325	while (1) {
1326		struct netdev_desc *desc = &(np->rx_ring[entry]);
1327		u32 frame_status = le32_to_cpu(desc->status);
1328		int pkt_len;
1329
1330		if (--boguscnt < 0) {
1331			goto not_done;
1332		}
1333		if (!(frame_status & DescOwn))
1334			break;
1335		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1336		if (netif_msg_rx_status(np))
1337			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1338				   frame_status);
1339		if (frame_status & 0x001f4000) {
1340			/* There was a error. */
1341			if (netif_msg_rx_err(np))
1342				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1343					   frame_status);
1344			dev->stats.rx_errors++;
1345			if (frame_status & 0x00100000)
1346				dev->stats.rx_length_errors++;
1347			if (frame_status & 0x00010000)
1348				dev->stats.rx_fifo_errors++;
1349			if (frame_status & 0x00060000)
1350				dev->stats.rx_frame_errors++;
1351			if (frame_status & 0x00080000)
1352				dev->stats.rx_crc_errors++;
1353			if (frame_status & 0x00100000) {
1354				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1355					   " status %8.8x.\n",
1356					   dev->name, frame_status);
1357			}
1358		} else {
1359			struct sk_buff *skb;
1360#ifndef final_version
1361			if (netif_msg_rx_status(np))
1362				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1363					   ", bogus_cnt %d.\n",
1364					   pkt_len, boguscnt);
1365#endif
1366			/* Check if the packet is long enough to accept without copying
1367			   to a minimally-sized skbuff. */
1368			if (pkt_len < rx_copybreak &&
1369			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1370				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1371				dma_sync_single_for_cpu(&np->pci_dev->dev,
1372						le32_to_cpu(desc->frag[0].addr),
1373						np->rx_buf_sz, DMA_FROM_DEVICE);
1374				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1375				dma_sync_single_for_device(&np->pci_dev->dev,
1376						le32_to_cpu(desc->frag[0].addr),
1377						np->rx_buf_sz, DMA_FROM_DEVICE);
1378				skb_put(skb, pkt_len);
1379			} else {
1380				dma_unmap_single(&np->pci_dev->dev,
1381					le32_to_cpu(desc->frag[0].addr),
1382					np->rx_buf_sz, DMA_FROM_DEVICE);
1383				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1384				np->rx_skbuff[entry] = NULL;
1385			}
1386			skb->protocol = eth_type_trans(skb, dev);
1387			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1388			netif_rx(skb);
1389		}
1390		entry = (entry + 1) % RX_RING_SIZE;
1391		received++;
1392	}
1393	np->cur_rx = entry;
1394	refill_rx (dev);
1395	np->budget -= received;
1396	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1397	return;
1398
1399not_done:
1400	np->cur_rx = entry;
1401	refill_rx (dev);
1402	if (!received)
1403		received = 1;
1404	np->budget -= received;
1405	if (np->budget <= 0)
1406		np->budget = RX_BUDGET;
1407	tasklet_schedule(&np->rx_tasklet);
1408}
1409
1410static void refill_rx (struct net_device *dev)
1411{
1412	struct netdev_private *np = netdev_priv(dev);
1413	int entry;
1414	int cnt = 0;
1415
1416	/* Refill the Rx ring buffers. */
1417	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1418		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1419		struct sk_buff *skb;
1420		entry = np->dirty_rx % RX_RING_SIZE;
1421		if (np->rx_skbuff[entry] == NULL) {
1422			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1423			np->rx_skbuff[entry] = skb;
1424			if (skb == NULL)
1425				break;		/* Better luck next round. */
1426			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1427			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1428				dma_map_single(&np->pci_dev->dev, skb->data,
1429					np->rx_buf_sz, DMA_FROM_DEVICE));
1430			if (dma_mapping_error(&np->pci_dev->dev,
1431				    np->rx_ring[entry].frag[0].addr)) {
1432			    dev_kfree_skb_irq(skb);
1433			    np->rx_skbuff[entry] = NULL;
1434			    break;
1435			}
1436		}
1437		/* Perhaps we need not reset this field. */
1438		np->rx_ring[entry].frag[0].length =
1439			cpu_to_le32(np->rx_buf_sz | LastFrag);
1440		np->rx_ring[entry].status = 0;
1441		cnt++;
1442	}
1443}
1444static void netdev_error(struct net_device *dev, int intr_status)
1445{
1446	struct netdev_private *np = netdev_priv(dev);
1447	void __iomem *ioaddr = np->base;
1448	u16 mii_ctl, mii_advertise, mii_lpa;
1449	int speed;
1450
1451	if (intr_status & LinkChange) {
1452		if (mdio_wait_link(dev, 10) == 0) {
1453			printk(KERN_INFO "%s: Link up\n", dev->name);
1454			if (np->an_enable) {
1455				mii_advertise = mdio_read(dev, np->phys[0],
1456							   MII_ADVERTISE);
1457				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1458				mii_advertise &= mii_lpa;
1459				printk(KERN_INFO "%s: Link changed: ",
1460					dev->name);
1461				if (mii_advertise & ADVERTISE_100FULL) {
1462					np->speed = 100;
1463					printk("100Mbps, full duplex\n");
1464				} else if (mii_advertise & ADVERTISE_100HALF) {
1465					np->speed = 100;
1466					printk("100Mbps, half duplex\n");
1467				} else if (mii_advertise & ADVERTISE_10FULL) {
1468					np->speed = 10;
1469					printk("10Mbps, full duplex\n");
1470				} else if (mii_advertise & ADVERTISE_10HALF) {
1471					np->speed = 10;
1472					printk("10Mbps, half duplex\n");
1473				} else
1474					printk("\n");
1475
1476			} else {
1477				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1478				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1479				np->speed = speed;
1480				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1481					dev->name, speed);
1482				printk("%s duplex.\n",
1483					(mii_ctl & BMCR_FULLDPLX) ?
1484						"full" : "half");
1485			}
1486			check_duplex(dev);
1487			if (np->flowctrl && np->mii_if.full_duplex) {
1488				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1489					ioaddr + MulticastFilter1+2);
1490				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1491					ioaddr + MACCtrl0);
1492			}
1493			netif_carrier_on(dev);
1494		} else {
1495			printk(KERN_INFO "%s: Link down\n", dev->name);
1496			netif_carrier_off(dev);
1497		}
1498	}
1499	if (intr_status & StatsMax) {
1500		get_stats(dev);
1501	}
1502	if (intr_status & IntrPCIErr) {
1503		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1504			   dev->name, intr_status);
1505		/* We must do a global reset of DMA to continue. */
1506	}
1507}
1508
1509static struct net_device_stats *get_stats(struct net_device *dev)
1510{
1511	struct netdev_private *np = netdev_priv(dev);
1512	void __iomem *ioaddr = np->base;
1513	unsigned long flags;
1514	u8 late_coll, single_coll, mult_coll;
1515
1516	spin_lock_irqsave(&np->statlock, flags);
1517	/* The chip only need report frame silently dropped. */
1518	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1519	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1520	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1521	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1522
1523	mult_coll = ioread8(ioaddr + StatsMultiColl);
1524	np->xstats.tx_multiple_collisions += mult_coll;
1525	single_coll = ioread8(ioaddr + StatsOneColl);
1526	np->xstats.tx_single_collisions += single_coll;
1527	late_coll = ioread8(ioaddr + StatsLateColl);
1528	np->xstats.tx_late_collisions += late_coll;
1529	dev->stats.collisions += mult_coll
1530		+ single_coll
1531		+ late_coll;
1532
1533	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1534	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1535	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1536	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1537	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1538	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1539	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1540
1541	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1542	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1543	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1544	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1545
1546	spin_unlock_irqrestore(&np->statlock, flags);
1547
1548	return &dev->stats;
1549}
1550
1551static void set_rx_mode(struct net_device *dev)
1552{
1553	struct netdev_private *np = netdev_priv(dev);
1554	void __iomem *ioaddr = np->base;
1555	u16 mc_filter[4];			/* Multicast hash filter */
1556	u32 rx_mode;
1557	int i;
1558
1559	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1560		memset(mc_filter, 0xff, sizeof(mc_filter));
1561		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1562	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1563		   (dev->flags & IFF_ALLMULTI)) {
1564		/* Too many to match, or accept all multicasts. */
1565		memset(mc_filter, 0xff, sizeof(mc_filter));
1566		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1567	} else if (!netdev_mc_empty(dev)) {
1568		struct netdev_hw_addr *ha;
1569		int bit;
1570		int index;
1571		int crc;
1572		memset (mc_filter, 0, sizeof (mc_filter));
1573		netdev_for_each_mc_addr(ha, dev) {
1574			crc = ether_crc_le(ETH_ALEN, ha->addr);
1575			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1576				if (crc & 0x80000000) index |= 1 << bit;
1577			mc_filter[index/16] |= (1 << (index % 16));
1578		}
1579		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1580	} else {
1581		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1582		return;
1583	}
1584	if (np->mii_if.full_duplex && np->flowctrl)
1585		mc_filter[3] |= 0x0200;
1586
1587	for (i = 0; i < 4; i++)
1588		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1589	iowrite8(rx_mode, ioaddr + RxMode);
1590}
1591
1592static int __set_mac_addr(struct net_device *dev)
1593{
1594	struct netdev_private *np = netdev_priv(dev);
1595	u16 addr16;
1596
1597	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1598	iowrite16(addr16, np->base + StationAddr);
1599	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1600	iowrite16(addr16, np->base + StationAddr+2);
1601	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1602	iowrite16(addr16, np->base + StationAddr+4);
1603	return 0;
1604}
1605
1606/* Invoked with rtnl_lock held */
1607static int sundance_set_mac_addr(struct net_device *dev, void *data)
1608{
1609	const struct sockaddr *addr = data;
1610
1611	if (!is_valid_ether_addr(addr->sa_data))
1612		return -EADDRNOTAVAIL;
1613	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1614	__set_mac_addr(dev);
1615
1616	return 0;
1617}
1618
1619static const struct {
1620	const char name[ETH_GSTRING_LEN];
1621} sundance_stats[] = {
1622	{ "tx_multiple_collisions" },
1623	{ "tx_single_collisions" },
1624	{ "tx_late_collisions" },
1625	{ "tx_deferred" },
1626	{ "tx_deferred_excessive" },
1627	{ "tx_aborted" },
1628	{ "tx_bcasts" },
1629	{ "rx_bcasts" },
1630	{ "tx_mcasts" },
1631	{ "rx_mcasts" },
1632};
1633
1634static int check_if_running(struct net_device *dev)
1635{
1636	if (!netif_running(dev))
1637		return -EINVAL;
1638	return 0;
1639}
1640
1641static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1642{
1643	struct netdev_private *np = netdev_priv(dev);
1644	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 
1645	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1646}
1647
1648static int get_link_ksettings(struct net_device *dev,
1649			      struct ethtool_link_ksettings *cmd)
1650{
1651	struct netdev_private *np = netdev_priv(dev);
1652	spin_lock_irq(&np->lock);
1653	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1654	spin_unlock_irq(&np->lock);
1655	return 0;
1656}
1657
1658static int set_link_ksettings(struct net_device *dev,
1659			      const struct ethtool_link_ksettings *cmd)
1660{
1661	struct netdev_private *np = netdev_priv(dev);
1662	int res;
1663	spin_lock_irq(&np->lock);
1664	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1665	spin_unlock_irq(&np->lock);
1666	return res;
1667}
1668
1669static int nway_reset(struct net_device *dev)
1670{
1671	struct netdev_private *np = netdev_priv(dev);
1672	return mii_nway_restart(&np->mii_if);
1673}
1674
1675static u32 get_link(struct net_device *dev)
1676{
1677	struct netdev_private *np = netdev_priv(dev);
1678	return mii_link_ok(&np->mii_if);
1679}
1680
1681static u32 get_msglevel(struct net_device *dev)
1682{
1683	struct netdev_private *np = netdev_priv(dev);
1684	return np->msg_enable;
1685}
1686
1687static void set_msglevel(struct net_device *dev, u32 val)
1688{
1689	struct netdev_private *np = netdev_priv(dev);
1690	np->msg_enable = val;
1691}
1692
1693static void get_strings(struct net_device *dev, u32 stringset,
1694		u8 *data)
1695{
1696	if (stringset == ETH_SS_STATS)
1697		memcpy(data, sundance_stats, sizeof(sundance_stats));
1698}
1699
1700static int get_sset_count(struct net_device *dev, int sset)
1701{
1702	switch (sset) {
1703	case ETH_SS_STATS:
1704		return ARRAY_SIZE(sundance_stats);
1705	default:
1706		return -EOPNOTSUPP;
1707	}
1708}
1709
1710static void get_ethtool_stats(struct net_device *dev,
1711		struct ethtool_stats *stats, u64 *data)
1712{
1713	struct netdev_private *np = netdev_priv(dev);
1714	int i = 0;
1715
1716	get_stats(dev);
1717	data[i++] = np->xstats.tx_multiple_collisions;
1718	data[i++] = np->xstats.tx_single_collisions;
1719	data[i++] = np->xstats.tx_late_collisions;
1720	data[i++] = np->xstats.tx_deferred;
1721	data[i++] = np->xstats.tx_deferred_excessive;
1722	data[i++] = np->xstats.tx_aborted;
1723	data[i++] = np->xstats.tx_bcasts;
1724	data[i++] = np->xstats.rx_bcasts;
1725	data[i++] = np->xstats.tx_mcasts;
1726	data[i++] = np->xstats.rx_mcasts;
1727}
1728
1729#ifdef CONFIG_PM
1730
1731static void sundance_get_wol(struct net_device *dev,
1732		struct ethtool_wolinfo *wol)
1733{
1734	struct netdev_private *np = netdev_priv(dev);
1735	void __iomem *ioaddr = np->base;
1736	u8 wol_bits;
1737
1738	wol->wolopts = 0;
1739
1740	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1741	if (!np->wol_enabled)
1742		return;
1743
1744	wol_bits = ioread8(ioaddr + WakeEvent);
1745	if (wol_bits & MagicPktEnable)
1746		wol->wolopts |= WAKE_MAGIC;
1747	if (wol_bits & LinkEventEnable)
1748		wol->wolopts |= WAKE_PHY;
1749}
1750
1751static int sundance_set_wol(struct net_device *dev,
1752	struct ethtool_wolinfo *wol)
1753{
1754	struct netdev_private *np = netdev_priv(dev);
1755	void __iomem *ioaddr = np->base;
1756	u8 wol_bits;
1757
1758	if (!device_can_wakeup(&np->pci_dev->dev))
1759		return -EOPNOTSUPP;
1760
1761	np->wol_enabled = !!(wol->wolopts);
1762	wol_bits = ioread8(ioaddr + WakeEvent);
1763	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1764			LinkEventEnable | WolEnable);
1765
1766	if (np->wol_enabled) {
1767		if (wol->wolopts & WAKE_MAGIC)
1768			wol_bits |= (MagicPktEnable | WolEnable);
1769		if (wol->wolopts & WAKE_PHY)
1770			wol_bits |= (LinkEventEnable | WolEnable);
1771	}
1772	iowrite8(wol_bits, ioaddr + WakeEvent);
1773
1774	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1775
1776	return 0;
1777}
1778#else
1779#define sundance_get_wol NULL
1780#define sundance_set_wol NULL
1781#endif /* CONFIG_PM */
1782
1783static const struct ethtool_ops ethtool_ops = {
1784	.begin = check_if_running,
1785	.get_drvinfo = get_drvinfo,
1786	.nway_reset = nway_reset,
1787	.get_link = get_link,
1788	.get_wol = sundance_get_wol,
1789	.set_wol = sundance_set_wol,
1790	.get_msglevel = get_msglevel,
1791	.set_msglevel = set_msglevel,
1792	.get_strings = get_strings,
1793	.get_sset_count = get_sset_count,
1794	.get_ethtool_stats = get_ethtool_stats,
1795	.get_link_ksettings = get_link_ksettings,
1796	.set_link_ksettings = set_link_ksettings,
1797};
1798
1799static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1800{
1801	struct netdev_private *np = netdev_priv(dev);
1802	int rc;
1803
1804	if (!netif_running(dev))
1805		return -EINVAL;
1806
1807	spin_lock_irq(&np->lock);
1808	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1809	spin_unlock_irq(&np->lock);
1810
1811	return rc;
1812}
1813
1814static int netdev_close(struct net_device *dev)
1815{
1816	struct netdev_private *np = netdev_priv(dev);
1817	void __iomem *ioaddr = np->base;
1818	struct sk_buff *skb;
1819	int i;
1820
1821	/* Wait and kill tasklet */
1822	tasklet_kill(&np->rx_tasklet);
1823	tasklet_kill(&np->tx_tasklet);
1824	np->cur_tx = 0;
1825	np->dirty_tx = 0;
1826	np->cur_task = 0;
1827	np->last_tx = NULL;
1828
1829	netif_stop_queue(dev);
1830
1831	if (netif_msg_ifdown(np)) {
1832		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1833			   "Rx %4.4x Int %2.2x.\n",
1834			   dev->name, ioread8(ioaddr + TxStatus),
1835			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1836		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1837			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1838	}
1839
1840	/* Disable interrupts by clearing the interrupt mask. */
1841	iowrite16(0x0000, ioaddr + IntrEnable);
1842
1843	/* Disable Rx and Tx DMA for safely release resource */
1844	iowrite32(0x500, ioaddr + DMACtrl);
1845
1846	/* Stop the chip's Tx and Rx processes. */
1847	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1848
1849    	for (i = 2000; i > 0; i--) {
1850 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1851			break;
1852		mdelay(1);
1853    	}
1854
1855    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1856			ioaddr + ASIC_HI_WORD(ASICCtrl));
1857
1858    	for (i = 2000; i > 0; i--) {
1859		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1860			break;
1861		mdelay(1);
1862    	}
1863
1864#ifdef __i386__
1865	if (netif_msg_hw(np)) {
1866		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1867			   (int)(np->tx_ring_dma));
1868		for (i = 0; i < TX_RING_SIZE; i++)
1869			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1870				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1871				   np->tx_ring[i].frag[0].length);
1872		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1873			   (int)(np->rx_ring_dma));
1874		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1875			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1876				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1877				   np->rx_ring[i].frag[0].length);
1878		}
1879	}
1880#endif /* __i386__ debugging only */
1881
1882	free_irq(np->pci_dev->irq, dev);
1883
1884	del_timer_sync(&np->timer);
1885
1886	/* Free all the skbuffs in the Rx queue. */
1887	for (i = 0; i < RX_RING_SIZE; i++) {
1888		np->rx_ring[i].status = 0;
1889		skb = np->rx_skbuff[i];
1890		if (skb) {
1891			dma_unmap_single(&np->pci_dev->dev,
1892				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1893				np->rx_buf_sz, DMA_FROM_DEVICE);
1894			dev_kfree_skb(skb);
1895			np->rx_skbuff[i] = NULL;
1896		}
1897		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1898	}
1899	for (i = 0; i < TX_RING_SIZE; i++) {
1900		np->tx_ring[i].next_desc = 0;
1901		skb = np->tx_skbuff[i];
1902		if (skb) {
1903			dma_unmap_single(&np->pci_dev->dev,
1904				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1905				skb->len, DMA_TO_DEVICE);
1906			dev_kfree_skb(skb);
1907			np->tx_skbuff[i] = NULL;
1908		}
1909	}
1910
1911	return 0;
1912}
1913
1914static void sundance_remove1(struct pci_dev *pdev)
1915{
1916	struct net_device *dev = pci_get_drvdata(pdev);
1917
1918	if (dev) {
1919	    struct netdev_private *np = netdev_priv(dev);
1920	    unregister_netdev(dev);
1921	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1922		    np->rx_ring, np->rx_ring_dma);
1923	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1924		    np->tx_ring, np->tx_ring_dma);
1925	    pci_iounmap(pdev, np->base);
1926	    pci_release_regions(pdev);
1927	    free_netdev(dev);
1928	}
1929}
1930
1931static int __maybe_unused sundance_suspend(struct device *dev_d)
 
 
1932{
1933	struct net_device *dev = dev_get_drvdata(dev_d);
1934	struct netdev_private *np = netdev_priv(dev);
1935	void __iomem *ioaddr = np->base;
1936
1937	if (!netif_running(dev))
1938		return 0;
1939
1940	netdev_close(dev);
1941	netif_device_detach(dev);
1942
 
1943	if (np->wol_enabled) {
1944		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1945		iowrite16(RxEnable, ioaddr + MACCtrl1);
1946	}
1947
1948	device_set_wakeup_enable(dev_d, np->wol_enabled);
 
1949
1950	return 0;
1951}
1952
1953static int __maybe_unused sundance_resume(struct device *dev_d)
1954{
1955	struct net_device *dev = dev_get_drvdata(dev_d);
1956	int err = 0;
1957
1958	if (!netif_running(dev))
1959		return 0;
1960
 
 
 
 
1961	err = netdev_open(dev);
1962	if (err) {
1963		printk(KERN_ERR "%s: Can't resume interface!\n",
1964				dev->name);
1965		goto out;
1966	}
1967
1968	netif_device_attach(dev);
1969
1970out:
1971	return err;
1972}
1973
1974static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1975
1976static struct pci_driver sundance_driver = {
1977	.name		= DRV_NAME,
1978	.id_table	= sundance_pci_tbl,
1979	.probe		= sundance_probe1,
1980	.remove		= sundance_remove1,
1981	.driver.pm	= &sundance_pm_ops,
 
 
 
1982};
1983
1984static int __init sundance_init(void)
1985{
 
 
 
 
1986	return pci_register_driver(&sundance_driver);
1987}
1988
1989static void __exit sundance_exit(void)
1990{
1991	pci_unregister_driver(&sundance_driver);
1992}
1993
1994module_init(sundance_init);
1995module_exit(sundance_exit);
1996
1997