Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	https://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
 
 
 
  26
  27/* The user-configurable values.
  28   These may be modified when a driver module is loaded.*/
  29static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  30/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  31   Typical is a 64 element hash table based on the Ethernet CRC.  */
  32static const int multicast_filter_limit = 32;
  33
  34/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  35   Setting to > 1518 effectively disables this feature.
  36   This chip can receive into offset buffers, so the Alpha does not
  37   need a copy-align. */
  38static int rx_copybreak;
  39static int flowctrl=1;
  40
  41/* media[] specifies the media type the NIC operates at.
  42		 autosense	Autosensing active media.
  43		 10mbps_hd 	10Mbps half duplex.
  44		 10mbps_fd 	10Mbps full duplex.
  45		 100mbps_hd 	100Mbps half duplex.
  46		 100mbps_fd 	100Mbps full duplex.
  47		 0		Autosensing active media.
  48		 1	 	10Mbps half duplex.
  49		 2	 	10Mbps full duplex.
  50		 3	 	100Mbps half duplex.
  51		 4	 	100Mbps full duplex.
  52*/
  53#define MAX_UNITS 8
  54static char *media[MAX_UNITS];
  55
  56
  57/* Operational parameters that are set at compile time. */
  58
  59/* Keep the ring sizes a power of two for compile efficiency.
  60   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  61   Making the Tx ring too large decreases the effectiveness of channel
  62   bonding and packet priority, and more than 128 requires modifying the
  63   Tx error recovery.
  64   Large receive rings merely waste memory. */
  65#define TX_RING_SIZE	32
  66#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  67#define RX_RING_SIZE	64
  68#define RX_BUDGET	32
  69#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  70#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  71
  72/* Operational parameters that usually are not changed. */
  73/* Time in jiffies before concluding the transmitter is hung. */
  74#define TX_TIMEOUT  (4*HZ)
  75#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  76
  77/* Include files, designed to support most kernel versions 2.0.0 and later. */
  78#include <linux/module.h>
  79#include <linux/kernel.h>
  80#include <linux/string.h>
  81#include <linux/timer.h>
  82#include <linux/errno.h>
  83#include <linux/ioport.h>
  84#include <linux/interrupt.h>
  85#include <linux/pci.h>
  86#include <linux/netdevice.h>
  87#include <linux/etherdevice.h>
  88#include <linux/skbuff.h>
  89#include <linux/init.h>
  90#include <linux/bitops.h>
  91#include <linux/uaccess.h>
  92#include <asm/processor.h>		/* Processor type for cache alignment. */
  93#include <asm/io.h>
  94#include <linux/delay.h>
  95#include <linux/spinlock.h>
  96#include <linux/dma-mapping.h>
  97#include <linux/crc32.h>
  98#include <linux/ethtool.h>
  99#include <linux/mii.h>
 100
 
 
 
 
 
 101MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 102MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 103MODULE_LICENSE("GPL");
 104
 105module_param(debug, int, 0);
 106module_param(rx_copybreak, int, 0);
 107module_param_array(media, charp, NULL, 0);
 108module_param(flowctrl, int, 0);
 109MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 110MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 111MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 112
 113/*
 114				Theory of Operation
 115
 116I. Board Compatibility
 117
 118This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 119
 120II. Board-specific settings
 121
 122III. Driver operation
 123
 124IIIa. Ring buffers
 125
 126This driver uses two statically allocated fixed-size descriptor lists
 127formed into rings by a branch from the final descriptor to the beginning of
 128the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 129Some chips explicitly use only 2^N sized rings, while others use a
 130'next descriptor' pointer that the driver forms into rings.
 131
 132IIIb/c. Transmit/Receive Structure
 133
 134This driver uses a zero-copy receive and transmit scheme.
 135The driver allocates full frame size skbuffs for the Rx ring buffers at
 136open() time and passes the skb->data field to the chip as receive data
 137buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 138a fresh skbuff is allocated and the frame is copied to the new skbuff.
 139When the incoming frame is larger, the skbuff is passed directly up the
 140protocol stack.  Buffers consumed this way are replaced by newly allocated
 141skbuffs in a later phase of receives.
 142
 143The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 144using a full-sized skbuff for small frames vs. the copying costs of larger
 145frames.  New boards are typically used in generously configured machines
 146and the underfilled buffers have negligible impact compared to the benefit of
 147a single allocation size, so the default value of zero results in never
 148copying packets.  When copying is done, the cost is usually mitigated by using
 149a combined copy/checksum routine.  Copying also preloads the cache, which is
 150most useful with small frames.
 151
 152A subtle aspect of the operation is that the IP header at offset 14 in an
 153ethernet frame isn't longword aligned for further processing.
 154Unaligned buffers are permitted by the Sundance hardware, so
 155frames are received into the skbuff at an offset of "+2", 16-byte aligning
 156the IP header.
 157
 158IIId. Synchronization
 159
 160The driver runs as two independent, single-threaded flows of control.  One
 161is the send-packet routine, which enforces single-threaded use by the
 162dev->tbusy flag.  The other thread is the interrupt handler, which is single
 163threaded by the hardware and interrupt handling software.
 164
 165The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 166flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 167queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 168the 'lp->tx_full' flag.
 169
 170The interrupt handler has exclusive control over the Rx ring and records stats
 171from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 172empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 173clears both the tx_full and tbusy flags.
 174
 175IV. Notes
 176
 177IVb. References
 178
 179The Sundance ST201 datasheet, preliminary version.
 180The Kendin KS8723 datasheet, preliminary version.
 181The ICplus IP100 datasheet, preliminary version.
 182http://www.scyld.com/expert/100mbps.html
 183http://www.scyld.com/expert/NWay.html
 184
 185IVc. Errata
 186
 187*/
 188
 189/* Work-around for Kendin chip bugs. */
 190#ifndef CONFIG_SUNDANCE_MMIO
 191#define USE_IO_OPS 1
 192#endif
 193
 194static const struct pci_device_id sundance_pci_tbl[] = {
 195	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 196	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 197	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 198	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 199	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 200	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 201	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 202	{ }
 203};
 204MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 205
 206enum {
 207	netdev_io_size = 128
 208};
 209
 210struct pci_id_info {
 211        const char *name;
 212};
 213static const struct pci_id_info pci_id_tbl[] = {
 214	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 215	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 216	{"D-Link DFE-580TX 4 port Server Adapter"},
 217	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 218	{"D-Link DL10050-based FAST Ethernet Adapter"},
 219	{"Sundance Technology Alta"},
 220	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 221	{ }	/* terminate list. */
 222};
 223
 224/* This driver was written to use PCI memory space, however x86-oriented
 225   hardware often uses I/O space accesses. */
 226
 227/* Offsets to the device registers.
 228   Unlike software-only systems, device drivers interact with complex hardware.
 229   It's not useful to define symbolic names for every register bit in the
 230   device.  The name can only partially document the semantics and make
 231   the driver longer and more difficult to read.
 232   In general, only the important configuration values or bits changed
 233   multiple times should be defined symbolically.
 234*/
 235enum alta_offsets {
 236	DMACtrl = 0x00,
 237	TxListPtr = 0x04,
 238	TxDMABurstThresh = 0x08,
 239	TxDMAUrgentThresh = 0x09,
 240	TxDMAPollPeriod = 0x0a,
 241	RxDMAStatus = 0x0c,
 242	RxListPtr = 0x10,
 243	DebugCtrl0 = 0x1a,
 244	DebugCtrl1 = 0x1c,
 245	RxDMABurstThresh = 0x14,
 246	RxDMAUrgentThresh = 0x15,
 247	RxDMAPollPeriod = 0x16,
 248	LEDCtrl = 0x1a,
 249	ASICCtrl = 0x30,
 250	EEData = 0x34,
 251	EECtrl = 0x36,
 252	FlashAddr = 0x40,
 253	FlashData = 0x44,
 254	WakeEvent = 0x45,
 255	TxStatus = 0x46,
 256	TxFrameId = 0x47,
 257	DownCounter = 0x18,
 258	IntrClear = 0x4a,
 259	IntrEnable = 0x4c,
 260	IntrStatus = 0x4e,
 261	MACCtrl0 = 0x50,
 262	MACCtrl1 = 0x52,
 263	StationAddr = 0x54,
 264	MaxFrameSize = 0x5A,
 265	RxMode = 0x5c,
 266	MIICtrl = 0x5e,
 267	MulticastFilter0 = 0x60,
 268	MulticastFilter1 = 0x64,
 269	RxOctetsLow = 0x68,
 270	RxOctetsHigh = 0x6a,
 271	TxOctetsLow = 0x6c,
 272	TxOctetsHigh = 0x6e,
 273	TxFramesOK = 0x70,
 274	RxFramesOK = 0x72,
 275	StatsCarrierError = 0x74,
 276	StatsLateColl = 0x75,
 277	StatsMultiColl = 0x76,
 278	StatsOneColl = 0x77,
 279	StatsTxDefer = 0x78,
 280	RxMissed = 0x79,
 281	StatsTxXSDefer = 0x7a,
 282	StatsTxAbort = 0x7b,
 283	StatsBcastTx = 0x7c,
 284	StatsBcastRx = 0x7d,
 285	StatsMcastTx = 0x7e,
 286	StatsMcastRx = 0x7f,
 287	/* Aliased and bogus values! */
 288	RxStatus = 0x0c,
 289};
 290
 291#define ASIC_HI_WORD(x)	((x) + 2)
 292
 293enum ASICCtrl_HiWord_bit {
 294	GlobalReset = 0x0001,
 295	RxReset = 0x0002,
 296	TxReset = 0x0004,
 297	DMAReset = 0x0008,
 298	FIFOReset = 0x0010,
 299	NetworkReset = 0x0020,
 300	HostReset = 0x0040,
 301	ResetBusy = 0x0400,
 302};
 303
 304/* Bits in the interrupt status/mask registers. */
 305enum intr_status_bits {
 306	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 307	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 308	IntrDrvRqst=0x0040,
 309	StatsMax=0x0080, LinkChange=0x0100,
 310	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 311};
 312
 313/* Bits in the RxMode register. */
 314enum rx_mode_bits {
 315	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 316	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 317};
 318/* Bits in MACCtrl. */
 319enum mac_ctrl0_bits {
 320	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 321	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 322};
 323enum mac_ctrl1_bits {
 324	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 325	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 326	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 327};
 328
 329/* Bits in WakeEvent register. */
 330enum wake_event_bits {
 331	WakePktEnable = 0x01,
 332	MagicPktEnable = 0x02,
 333	LinkEventEnable = 0x04,
 334	WolEnable = 0x80,
 335};
 336
 337/* The Rx and Tx buffer descriptors. */
 338/* Note that using only 32 bit fields simplifies conversion to big-endian
 339   architectures. */
 340struct netdev_desc {
 341	__le32 next_desc;
 342	__le32 status;
 343	struct desc_frag { __le32 addr, length; } frag;
 344};
 345
 346/* Bits in netdev_desc.status */
 347enum desc_status_bits {
 348	DescOwn=0x8000,
 349	DescEndPacket=0x4000,
 350	DescEndRing=0x2000,
 351	LastFrag=0x80000000,
 352	DescIntrOnTx=0x8000,
 353	DescIntrOnDMADone=0x80000000,
 354	DisableAlign = 0x00000001,
 355};
 356
 357#define PRIV_ALIGN	15 	/* Required alignment mask */
 358/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 359   within the structure. */
 360#define MII_CNT		4
 361struct netdev_private {
 362	/* Descriptor rings first for alignment. */
 363	struct netdev_desc *rx_ring;
 364	struct netdev_desc *tx_ring;
 365	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 366	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 367        dma_addr_t tx_ring_dma;
 368        dma_addr_t rx_ring_dma;
 369	struct timer_list timer;		/* Media monitoring timer. */
 370	struct net_device *ndev;		/* backpointer */
 371	/* ethtool extra stats */
 372	struct {
 373		u64 tx_multiple_collisions;
 374		u64 tx_single_collisions;
 375		u64 tx_late_collisions;
 376		u64 tx_deferred;
 377		u64 tx_deferred_excessive;
 378		u64 tx_aborted;
 379		u64 tx_bcasts;
 380		u64 rx_bcasts;
 381		u64 tx_mcasts;
 382		u64 rx_mcasts;
 383	} xstats;
 384	/* Frequently used values: keep some adjacent for cache effect. */
 385	spinlock_t lock;
 386	int msg_enable;
 387	int chip_id;
 388	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 389	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 390	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 391	unsigned int cur_tx, dirty_tx;
 392	/* These values are keep track of the transceiver/media in use. */
 393	unsigned int flowctrl:1;
 394	unsigned int default_port:4;		/* Last dev->if_port value. */
 395	unsigned int an_enable:1;
 396	unsigned int speed;
 397	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
 398	struct tasklet_struct rx_tasklet;
 399	struct tasklet_struct tx_tasklet;
 400	int budget;
 401	int cur_task;
 402	/* Multicast and receive mode. */
 403	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 404	u16 mcast_filter[4];
 405	/* MII transceiver section. */
 406	struct mii_if_info mii_if;
 407	int mii_preamble_required;
 408	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 409	struct pci_dev *pci_dev;
 410	void __iomem *base;
 411	spinlock_t statlock;
 412};
 413
 414/* The station address location in the EEPROM. */
 415#define EEPROM_SA_OFFSET	0x10
 416#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 417			IntrDrvRqst | IntrTxDone | StatsMax | \
 418			LinkChange)
 419
 420static int  change_mtu(struct net_device *dev, int new_mtu);
 421static int  eeprom_read(void __iomem *ioaddr, int location);
 422static int  mdio_read(struct net_device *dev, int phy_id, int location);
 423static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 424static int  mdio_wait_link(struct net_device *dev, int wait);
 425static int  netdev_open(struct net_device *dev);
 426static void check_duplex(struct net_device *dev);
 427static void netdev_timer(struct timer_list *t);
 428static void tx_timeout(struct net_device *dev, unsigned int txqueue);
 429static void init_ring(struct net_device *dev);
 430static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 431static int reset_tx (struct net_device *dev);
 432static irqreturn_t intr_handler(int irq, void *dev_instance);
 433static void rx_poll(struct tasklet_struct *t);
 434static void tx_poll(struct tasklet_struct *t);
 435static void refill_rx (struct net_device *dev);
 436static void netdev_error(struct net_device *dev, int intr_status);
 437static void netdev_error(struct net_device *dev, int intr_status);
 438static void set_rx_mode(struct net_device *dev);
 439static int __set_mac_addr(struct net_device *dev);
 440static int sundance_set_mac_addr(struct net_device *dev, void *data);
 441static struct net_device_stats *get_stats(struct net_device *dev);
 442static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 443static int  netdev_close(struct net_device *dev);
 444static const struct ethtool_ops ethtool_ops;
 445
 446static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 447{
 448	struct netdev_private *np = netdev_priv(dev);
 449	void __iomem *ioaddr = np->base + ASICCtrl;
 450	int countdown;
 451
 452	/* ST201 documentation states ASICCtrl is a 32bit register */
 453	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 454	/* ST201 documentation states reset can take up to 1 ms */
 455	countdown = 10 + 1;
 456	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 457		if (--countdown == 0) {
 458			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 459			break;
 460		}
 461		udelay(100);
 462	}
 463}
 464
 465#ifdef CONFIG_NET_POLL_CONTROLLER
 466static void sundance_poll_controller(struct net_device *dev)
 467{
 468	struct netdev_private *np = netdev_priv(dev);
 469
 470	disable_irq(np->pci_dev->irq);
 471	intr_handler(np->pci_dev->irq, dev);
 472	enable_irq(np->pci_dev->irq);
 473}
 474#endif
 475
 476static const struct net_device_ops netdev_ops = {
 477	.ndo_open		= netdev_open,
 478	.ndo_stop		= netdev_close,
 479	.ndo_start_xmit		= start_tx,
 480	.ndo_get_stats 		= get_stats,
 481	.ndo_set_rx_mode	= set_rx_mode,
 482	.ndo_eth_ioctl		= netdev_ioctl,
 483	.ndo_tx_timeout		= tx_timeout,
 484	.ndo_change_mtu		= change_mtu,
 485	.ndo_set_mac_address 	= sundance_set_mac_addr,
 486	.ndo_validate_addr	= eth_validate_addr,
 487#ifdef CONFIG_NET_POLL_CONTROLLER
 488	.ndo_poll_controller 	= sundance_poll_controller,
 489#endif
 490};
 491
 492static int sundance_probe1(struct pci_dev *pdev,
 493			   const struct pci_device_id *ent)
 494{
 495	struct net_device *dev;
 496	struct netdev_private *np;
 497	static int card_idx;
 498	int chip_idx = ent->driver_data;
 499	int irq;
 500	int i;
 501	void __iomem *ioaddr;
 502	u16 mii_ctl;
 503	void *ring_space;
 504	dma_addr_t ring_dma;
 505#ifdef USE_IO_OPS
 506	int bar = 0;
 507#else
 508	int bar = 1;
 509#endif
 510	int phy, phy_end, phy_idx = 0;
 511	__le16 addr[ETH_ALEN / 2];
 
 
 
 
 
 
 512
 513	if (pci_enable_device(pdev))
 514		return -EIO;
 515	pci_set_master(pdev);
 516
 517	irq = pdev->irq;
 518
 519	dev = alloc_etherdev(sizeof(*np));
 520	if (!dev)
 521		return -ENOMEM;
 522	SET_NETDEV_DEV(dev, &pdev->dev);
 523
 524	if (pci_request_regions(pdev, DRV_NAME))
 525		goto err_out_netdev;
 526
 527	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 528	if (!ioaddr)
 529		goto err_out_res;
 530
 531	for (i = 0; i < 3; i++)
 532		addr[i] =
 533			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 534	eth_hw_addr_set(dev, (u8 *)addr);
 535
 536	np = netdev_priv(dev);
 537	np->ndev = dev;
 538	np->base = ioaddr;
 539	np->pci_dev = pdev;
 540	np->chip_id = chip_idx;
 541	np->msg_enable = (1 << debug) - 1;
 542	spin_lock_init(&np->lock);
 543	spin_lock_init(&np->statlock);
 544	tasklet_setup(&np->rx_tasklet, rx_poll);
 545	tasklet_setup(&np->tx_tasklet, tx_poll);
 546
 547	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 548			&ring_dma, GFP_KERNEL);
 549	if (!ring_space)
 550		goto err_out_cleardev;
 551	np->tx_ring = (struct netdev_desc *)ring_space;
 552	np->tx_ring_dma = ring_dma;
 553
 554	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 555			&ring_dma, GFP_KERNEL);
 556	if (!ring_space)
 557		goto err_out_unmap_tx;
 558	np->rx_ring = (struct netdev_desc *)ring_space;
 559	np->rx_ring_dma = ring_dma;
 560
 561	np->mii_if.dev = dev;
 562	np->mii_if.mdio_read = mdio_read;
 563	np->mii_if.mdio_write = mdio_write;
 564	np->mii_if.phy_id_mask = 0x1f;
 565	np->mii_if.reg_num_mask = 0x1f;
 566
 567	/* The chip-specific entries in the device structure. */
 568	dev->netdev_ops = &netdev_ops;
 569	dev->ethtool_ops = &ethtool_ops;
 570	dev->watchdog_timeo = TX_TIMEOUT;
 571
 572	/* MTU range: 68 - 8191 */
 573	dev->min_mtu = ETH_MIN_MTU;
 574	dev->max_mtu = 8191;
 575
 576	pci_set_drvdata(pdev, dev);
 577
 578	i = register_netdev(dev);
 579	if (i)
 580		goto err_out_unmap_rx;
 581
 582	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 583	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 584	       dev->dev_addr, irq);
 585
 586	np->phys[0] = 1;		/* Default setting */
 587	np->mii_preamble_required++;
 588
 589	/*
 590	 * It seems some phys doesn't deal well with address 0 being accessed
 591	 * first
 592	 */
 593	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 594		phy = 0;
 595		phy_end = 31;
 596	} else {
 597		phy = 1;
 598		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 599	}
 600	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 601		int phyx = phy & 0x1f;
 602		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 603		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 604			np->phys[phy_idx++] = phyx;
 605			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 606			if ((mii_status & 0x0040) == 0)
 607				np->mii_preamble_required++;
 608			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 609				   "0x%4.4x advertising %4.4x.\n",
 610				   dev->name, phyx, mii_status, np->mii_if.advertising);
 611		}
 612	}
 613	np->mii_preamble_required--;
 614
 615	if (phy_idx == 0) {
 616		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 617			   dev->name, ioread32(ioaddr + ASICCtrl));
 618		goto err_out_unregister;
 619	}
 620
 621	np->mii_if.phy_id = np->phys[0];
 622
 623	/* Parse override configuration */
 624	np->an_enable = 1;
 625	if (card_idx < MAX_UNITS) {
 626		if (media[card_idx] != NULL) {
 627			np->an_enable = 0;
 628			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 629			    strcmp (media[card_idx], "4") == 0) {
 630				np->speed = 100;
 631				np->mii_if.full_duplex = 1;
 632			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 633				   strcmp (media[card_idx], "3") == 0) {
 634				np->speed = 100;
 635				np->mii_if.full_duplex = 0;
 636			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 637				   strcmp (media[card_idx], "2") == 0) {
 638				np->speed = 10;
 639				np->mii_if.full_duplex = 1;
 640			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 641				   strcmp (media[card_idx], "1") == 0) {
 642				np->speed = 10;
 643				np->mii_if.full_duplex = 0;
 644			} else {
 645				np->an_enable = 1;
 646			}
 647		}
 648		if (flowctrl == 1)
 649			np->flowctrl = 1;
 650	}
 651
 652	/* Fibre PHY? */
 653	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 654		/* Default 100Mbps Full */
 655		if (np->an_enable) {
 656			np->speed = 100;
 657			np->mii_if.full_duplex = 1;
 658			np->an_enable = 0;
 659		}
 660	}
 661	/* Reset PHY */
 662	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 663	mdelay (300);
 664	/* If flow control enabled, we need to advertise it.*/
 665	if (np->flowctrl)
 666		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 667	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 668	/* Force media type */
 669	if (!np->an_enable) {
 670		mii_ctl = 0;
 671		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 672		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 673		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 674		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 675			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 676
 677	}
 678
 679	/* Perhaps move the reset here? */
 680	/* Reset the chip to erase previous misconfiguration. */
 681	if (netif_msg_hw(np))
 682		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 683	sundance_reset(dev, 0x00ff << 16);
 684	if (netif_msg_hw(np))
 685		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 686
 687	card_idx++;
 688	return 0;
 689
 690err_out_unregister:
 691	unregister_netdev(dev);
 692err_out_unmap_rx:
 693	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 694		np->rx_ring, np->rx_ring_dma);
 695err_out_unmap_tx:
 696	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 697		np->tx_ring, np->tx_ring_dma);
 698err_out_cleardev:
 
 699	pci_iounmap(pdev, ioaddr);
 700err_out_res:
 701	pci_release_regions(pdev);
 702err_out_netdev:
 703	free_netdev (dev);
 704	return -ENODEV;
 705}
 706
 707static int change_mtu(struct net_device *dev, int new_mtu)
 708{
 
 
 709	if (netif_running(dev))
 710		return -EBUSY;
 711	dev->mtu = new_mtu;
 712	return 0;
 713}
 714
 715#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 716/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 717static int eeprom_read(void __iomem *ioaddr, int location)
 718{
 719	int boguscnt = 10000;		/* Typical 1900 ticks. */
 720	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 721	do {
 722		eeprom_delay(ioaddr + EECtrl);
 723		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 724			return ioread16(ioaddr + EEData);
 725		}
 726	} while (--boguscnt > 0);
 727	return 0;
 728}
 729
 730/*  MII transceiver control section.
 731	Read and write the MII registers using software-generated serial
 732	MDIO protocol.  See the MII specifications or DP83840A data sheet
 733	for details.
 734
 735	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 736	met by back-to-back 33Mhz PCI cycles. */
 737#define mdio_delay() ioread8(mdio_addr)
 738
 739enum mii_reg_bits {
 740	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 741};
 742#define MDIO_EnbIn  (0)
 743#define MDIO_WRITE0 (MDIO_EnbOutput)
 744#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 745
 746/* Generate the preamble required for initial synchronization and
 747   a few older transceivers. */
 748static void mdio_sync(void __iomem *mdio_addr)
 749{
 750	int bits = 32;
 751
 752	/* Establish sync by sending at least 32 logic ones. */
 753	while (--bits >= 0) {
 754		iowrite8(MDIO_WRITE1, mdio_addr);
 755		mdio_delay();
 756		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 757		mdio_delay();
 758	}
 759}
 760
 761static int mdio_read(struct net_device *dev, int phy_id, int location)
 762{
 763	struct netdev_private *np = netdev_priv(dev);
 764	void __iomem *mdio_addr = np->base + MIICtrl;
 765	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 766	int i, retval = 0;
 767
 768	if (np->mii_preamble_required)
 769		mdio_sync(mdio_addr);
 770
 771	/* Shift the read command bits out. */
 772	for (i = 15; i >= 0; i--) {
 773		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 774
 775		iowrite8(dataval, mdio_addr);
 776		mdio_delay();
 777		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 778		mdio_delay();
 779	}
 780	/* Read the two transition, 16 data, and wire-idle bits. */
 781	for (i = 19; i > 0; i--) {
 782		iowrite8(MDIO_EnbIn, mdio_addr);
 783		mdio_delay();
 784		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 785		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 786		mdio_delay();
 787	}
 788	return (retval>>1) & 0xffff;
 789}
 790
 791static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 792{
 793	struct netdev_private *np = netdev_priv(dev);
 794	void __iomem *mdio_addr = np->base + MIICtrl;
 795	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 796	int i;
 797
 798	if (np->mii_preamble_required)
 799		mdio_sync(mdio_addr);
 800
 801	/* Shift the command bits out. */
 802	for (i = 31; i >= 0; i--) {
 803		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 804
 805		iowrite8(dataval, mdio_addr);
 806		mdio_delay();
 807		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 808		mdio_delay();
 809	}
 810	/* Clear out extra bits. */
 811	for (i = 2; i > 0; i--) {
 812		iowrite8(MDIO_EnbIn, mdio_addr);
 813		mdio_delay();
 814		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 815		mdio_delay();
 816	}
 817}
 818
 819static int mdio_wait_link(struct net_device *dev, int wait)
 820{
 821	int bmsr;
 822	int phy_id;
 823	struct netdev_private *np;
 824
 825	np = netdev_priv(dev);
 826	phy_id = np->phys[0];
 827
 828	do {
 829		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 830		if (bmsr & 0x0004)
 831			return 0;
 832		mdelay(1);
 833	} while (--wait > 0);
 834	return -1;
 835}
 836
 837static int netdev_open(struct net_device *dev)
 838{
 839	struct netdev_private *np = netdev_priv(dev);
 840	void __iomem *ioaddr = np->base;
 841	const int irq = np->pci_dev->irq;
 842	unsigned long flags;
 843	int i;
 844
 845	sundance_reset(dev, 0x00ff << 16);
 846
 847	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 848	if (i)
 849		return i;
 850
 851	if (netif_msg_ifup(np))
 852		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 853
 854	init_ring(dev);
 855
 856	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 857	/* The Tx list pointer is written as packets are queued. */
 858
 859	/* Initialize other registers. */
 860	__set_mac_addr(dev);
 861#if IS_ENABLED(CONFIG_VLAN_8021Q)
 862	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 863#else
 864	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 865#endif
 866	if (dev->mtu > 2047)
 867		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 868
 869	/* Configure the PCI bus bursts and FIFO thresholds. */
 870
 871	if (dev->if_port == 0)
 872		dev->if_port = np->default_port;
 873
 874	spin_lock_init(&np->mcastlock);
 875
 876	set_rx_mode(dev);
 877	iowrite16(0, ioaddr + IntrEnable);
 878	iowrite16(0, ioaddr + DownCounter);
 879	/* Set the chip to poll every N*320nsec. */
 880	iowrite8(100, ioaddr + RxDMAPollPeriod);
 881	iowrite8(127, ioaddr + TxDMAPollPeriod);
 882	/* Fix DFE-580TX packet drop issue */
 883	if (np->pci_dev->revision >= 0x14)
 884		iowrite8(0x01, ioaddr + DebugCtrl1);
 885	netif_start_queue(dev);
 886
 887	spin_lock_irqsave(&np->lock, flags);
 888	reset_tx(dev);
 889	spin_unlock_irqrestore(&np->lock, flags);
 890
 891	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 892
 893	/* Disable Wol */
 894	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
 895	np->wol_enabled = 0;
 896
 897	if (netif_msg_ifup(np))
 898		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 899			   "MAC Control %x, %4.4x %4.4x.\n",
 900			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 901			   ioread32(ioaddr + MACCtrl0),
 902			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 903
 904	/* Set the timer to check for link beat. */
 905	timer_setup(&np->timer, netdev_timer, 0);
 906	np->timer.expires = jiffies + 3*HZ;
 
 
 907	add_timer(&np->timer);
 908
 909	/* Enable interrupts by setting the interrupt mask. */
 910	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 911
 912	return 0;
 913}
 914
 915static void check_duplex(struct net_device *dev)
 916{
 917	struct netdev_private *np = netdev_priv(dev);
 918	void __iomem *ioaddr = np->base;
 919	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 920	int negotiated = mii_lpa & np->mii_if.advertising;
 921	int duplex;
 922
 923	/* Force media */
 924	if (!np->an_enable || mii_lpa == 0xffff) {
 925		if (np->mii_if.full_duplex)
 926			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 927				ioaddr + MACCtrl0);
 928		return;
 929	}
 930
 931	/* Autonegotiation */
 932	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 933	if (np->mii_if.full_duplex != duplex) {
 934		np->mii_if.full_duplex = duplex;
 935		if (netif_msg_link(np))
 936			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 937				   "negotiated capability %4.4x.\n", dev->name,
 938				   duplex ? "full" : "half", np->phys[0], negotiated);
 939		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 940	}
 941}
 942
 943static void netdev_timer(struct timer_list *t)
 944{
 945	struct netdev_private *np = from_timer(np, t, timer);
 946	struct net_device *dev = np->mii_if.dev;
 947	void __iomem *ioaddr = np->base;
 948	int next_tick = 10*HZ;
 949
 950	if (netif_msg_timer(np)) {
 951		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 952			   "Tx %x Rx %x.\n",
 953			   dev->name, ioread16(ioaddr + IntrEnable),
 954			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 955	}
 956	check_duplex(dev);
 957	np->timer.expires = jiffies + next_tick;
 958	add_timer(&np->timer);
 959}
 960
 961static void tx_timeout(struct net_device *dev, unsigned int txqueue)
 962{
 963	struct netdev_private *np = netdev_priv(dev);
 964	void __iomem *ioaddr = np->base;
 965	unsigned long flag;
 966
 967	netif_stop_queue(dev);
 968	tasklet_disable_in_atomic(&np->tx_tasklet);
 969	iowrite16(0, ioaddr + IntrEnable);
 970	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 971		   "TxFrameId %2.2x,"
 972		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 973		   ioread8(ioaddr + TxFrameId));
 974
 975	{
 976		int i;
 977		for (i=0; i<TX_RING_SIZE; i++) {
 978			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 979				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 980				le32_to_cpu(np->tx_ring[i].next_desc),
 981				le32_to_cpu(np->tx_ring[i].status),
 982				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 983				le32_to_cpu(np->tx_ring[i].frag.addr),
 984				le32_to_cpu(np->tx_ring[i].frag.length));
 985		}
 986		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 987			ioread32(np->base + TxListPtr),
 988			netif_queue_stopped(dev));
 989		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 990			np->cur_tx, np->cur_tx % TX_RING_SIZE,
 991			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 992		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 993		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 994	}
 995	spin_lock_irqsave(&np->lock, flag);
 996
 997	/* Stop and restart the chip's Tx processes . */
 998	reset_tx(dev);
 999	spin_unlock_irqrestore(&np->lock, flag);
1000
1001	dev->if_port = 0;
1002
1003	netif_trans_update(dev); /* prevent tx timeout */
1004	dev->stats.tx_errors++;
1005	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1006		netif_wake_queue(dev);
1007	}
1008	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1009	tasklet_enable(&np->tx_tasklet);
1010}
1011
1012
1013/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1014static void init_ring(struct net_device *dev)
1015{
1016	struct netdev_private *np = netdev_priv(dev);
1017	int i;
1018
1019	np->cur_rx = np->cur_tx = 0;
1020	np->dirty_rx = np->dirty_tx = 0;
1021	np->cur_task = 0;
1022
1023	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1024
1025	/* Initialize all Rx descriptors. */
1026	for (i = 0; i < RX_RING_SIZE; i++) {
1027		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1028			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1029		np->rx_ring[i].status = 0;
1030		np->rx_ring[i].frag.length = 0;
1031		np->rx_skbuff[i] = NULL;
1032	}
1033
1034	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1035	for (i = 0; i < RX_RING_SIZE; i++) {
1036		struct sk_buff *skb =
1037			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1038		np->rx_skbuff[i] = skb;
1039		if (skb == NULL)
1040			break;
1041		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1042		np->rx_ring[i].frag.addr = cpu_to_le32(
1043			dma_map_single(&np->pci_dev->dev, skb->data,
1044				np->rx_buf_sz, DMA_FROM_DEVICE));
1045		if (dma_mapping_error(&np->pci_dev->dev,
1046					np->rx_ring[i].frag.addr)) {
1047			dev_kfree_skb(skb);
1048			np->rx_skbuff[i] = NULL;
1049			break;
1050		}
1051		np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1052	}
1053	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1054
1055	for (i = 0; i < TX_RING_SIZE; i++) {
1056		np->tx_skbuff[i] = NULL;
1057		np->tx_ring[i].status = 0;
1058	}
1059}
1060
1061static void tx_poll(struct tasklet_struct *t)
1062{
1063	struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
 
1064	unsigned head = np->cur_task % TX_RING_SIZE;
1065	struct netdev_desc *txdesc =
1066		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1067
1068	/* Chain the next pointer */
1069	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1070		int entry = np->cur_task % TX_RING_SIZE;
1071		txdesc = &np->tx_ring[entry];
1072		if (np->last_tx) {
1073			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1074				entry*sizeof(struct netdev_desc));
1075		}
1076		np->last_tx = txdesc;
1077	}
1078	/* Indicate the latest descriptor of tx ring */
1079	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1080
1081	if (ioread32 (np->base + TxListPtr) == 0)
1082		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1083			np->base + TxListPtr);
1084}
1085
1086static netdev_tx_t
1087start_tx (struct sk_buff *skb, struct net_device *dev)
1088{
1089	struct netdev_private *np = netdev_priv(dev);
1090	struct netdev_desc *txdesc;
1091	unsigned entry;
1092
1093	/* Calculate the next Tx descriptor entry. */
1094	entry = np->cur_tx % TX_RING_SIZE;
1095	np->tx_skbuff[entry] = skb;
1096	txdesc = &np->tx_ring[entry];
1097
1098	txdesc->next_desc = 0;
1099	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1100	txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1101				skb->data, skb->len, DMA_TO_DEVICE));
1102	if (dma_mapping_error(&np->pci_dev->dev,
1103				txdesc->frag.addr))
1104			goto drop_frame;
1105	txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
1106
1107	/* Increment cur_tx before tasklet_schedule() */
1108	np->cur_tx++;
1109	mb();
1110	/* Schedule a tx_poll() task */
1111	tasklet_schedule(&np->tx_tasklet);
1112
1113	/* On some architectures: explicitly flush cache lines here. */
1114	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1115	    !netif_queue_stopped(dev)) {
1116		/* do nothing */
1117	} else {
1118		netif_stop_queue (dev);
1119	}
1120	if (netif_msg_tx_queued(np)) {
1121		printk (KERN_DEBUG
1122			"%s: Transmit frame #%d queued in slot %d.\n",
1123			dev->name, np->cur_tx, entry);
1124	}
1125	return NETDEV_TX_OK;
1126
1127drop_frame:
1128	dev_kfree_skb_any(skb);
1129	np->tx_skbuff[entry] = NULL;
1130	dev->stats.tx_dropped++;
1131	return NETDEV_TX_OK;
1132}
1133
1134/* Reset hardware tx and free all of tx buffers */
1135static int
1136reset_tx (struct net_device *dev)
1137{
1138	struct netdev_private *np = netdev_priv(dev);
1139	void __iomem *ioaddr = np->base;
1140	struct sk_buff *skb;
1141	int i;
1142
1143	/* Reset tx logic, TxListPtr will be cleaned */
1144	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1145	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1146
1147	/* free all tx skbuff */
1148	for (i = 0; i < TX_RING_SIZE; i++) {
1149		np->tx_ring[i].next_desc = 0;
1150
1151		skb = np->tx_skbuff[i];
1152		if (skb) {
1153			dma_unmap_single(&np->pci_dev->dev,
1154				le32_to_cpu(np->tx_ring[i].frag.addr),
1155				skb->len, DMA_TO_DEVICE);
1156			dev_kfree_skb_any(skb);
1157			np->tx_skbuff[i] = NULL;
1158			dev->stats.tx_dropped++;
1159		}
1160	}
1161	np->cur_tx = np->dirty_tx = 0;
1162	np->cur_task = 0;
1163
1164	np->last_tx = NULL;
1165	iowrite8(127, ioaddr + TxDMAPollPeriod);
1166
1167	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1168	return 0;
1169}
1170
1171/* The interrupt handler cleans up after the Tx thread,
1172   and schedule a Rx thread work */
1173static irqreturn_t intr_handler(int irq, void *dev_instance)
1174{
1175	struct net_device *dev = (struct net_device *)dev_instance;
1176	struct netdev_private *np = netdev_priv(dev);
1177	void __iomem *ioaddr = np->base;
1178	int hw_frame_id;
1179	int tx_cnt;
1180	int tx_status;
1181	int handled = 0;
1182	int i;
1183
 
1184	do {
1185		int intr_status = ioread16(ioaddr + IntrStatus);
1186		iowrite16(intr_status, ioaddr + IntrStatus);
1187
1188		if (netif_msg_intr(np))
1189			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1190				   dev->name, intr_status);
1191
1192		if (!(intr_status & DEFAULT_INTR))
1193			break;
1194
1195		handled = 1;
1196
1197		if (intr_status & (IntrRxDMADone)) {
1198			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1199					ioaddr + IntrEnable);
1200			if (np->budget < 0)
1201				np->budget = RX_BUDGET;
1202			tasklet_schedule(&np->rx_tasklet);
1203		}
1204		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1205			tx_status = ioread16 (ioaddr + TxStatus);
1206			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1207				if (netif_msg_tx_done(np))
1208					printk
1209					    ("%s: Transmit status is %2.2x.\n",
1210				     	dev->name, tx_status);
1211				if (tx_status & 0x1e) {
1212					if (netif_msg_tx_err(np))
1213						printk("%s: Transmit error status %4.4x.\n",
1214							   dev->name, tx_status);
1215					dev->stats.tx_errors++;
1216					if (tx_status & 0x10)
1217						dev->stats.tx_fifo_errors++;
1218					if (tx_status & 0x08)
1219						dev->stats.collisions++;
1220					if (tx_status & 0x04)
1221						dev->stats.tx_fifo_errors++;
1222					if (tx_status & 0x02)
1223						dev->stats.tx_window_errors++;
1224
1225					/*
1226					** This reset has been verified on
1227					** DFE-580TX boards ! phdm@macqel.be.
1228					*/
1229					if (tx_status & 0x10) {	/* TxUnderrun */
1230						/* Restart Tx FIFO and transmitter */
1231						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1232						/* No need to reset the Tx pointer here */
1233					}
1234					/* Restart the Tx. Need to make sure tx enabled */
1235					i = 10;
1236					do {
1237						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1238						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1239							break;
1240						mdelay(1);
1241					} while (--i);
1242				}
1243				/* Yup, this is a documentation bug.  It cost me *hours*. */
1244				iowrite16 (0, ioaddr + TxStatus);
1245				if (tx_cnt < 0) {
1246					iowrite32(5000, ioaddr + DownCounter);
1247					break;
1248				}
1249				tx_status = ioread16 (ioaddr + TxStatus);
1250			}
1251			hw_frame_id = (tx_status >> 8) & 0xff;
1252		} else 	{
1253			hw_frame_id = ioread8(ioaddr + TxFrameId);
1254		}
1255
1256		if (np->pci_dev->revision >= 0x14) {
1257			spin_lock(&np->lock);
1258			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1259				int entry = np->dirty_tx % TX_RING_SIZE;
1260				struct sk_buff *skb;
1261				int sw_frame_id;
1262				sw_frame_id = (le32_to_cpu(
1263					np->tx_ring[entry].status) >> 2) & 0xff;
1264				if (sw_frame_id == hw_frame_id &&
1265					!(le32_to_cpu(np->tx_ring[entry].status)
1266					& 0x00010000))
1267						break;
1268				if (sw_frame_id == (hw_frame_id + 1) %
1269					TX_RING_SIZE)
1270						break;
1271				skb = np->tx_skbuff[entry];
1272				/* Free the original skb. */
1273				dma_unmap_single(&np->pci_dev->dev,
1274					le32_to_cpu(np->tx_ring[entry].frag.addr),
1275					skb->len, DMA_TO_DEVICE);
1276				dev_consume_skb_irq(np->tx_skbuff[entry]);
1277				np->tx_skbuff[entry] = NULL;
1278				np->tx_ring[entry].frag.addr = 0;
1279				np->tx_ring[entry].frag.length = 0;
1280			}
1281			spin_unlock(&np->lock);
1282		} else {
1283			spin_lock(&np->lock);
1284			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1285				int entry = np->dirty_tx % TX_RING_SIZE;
1286				struct sk_buff *skb;
1287				if (!(le32_to_cpu(np->tx_ring[entry].status)
1288							& 0x00010000))
1289					break;
1290				skb = np->tx_skbuff[entry];
1291				/* Free the original skb. */
1292				dma_unmap_single(&np->pci_dev->dev,
1293					le32_to_cpu(np->tx_ring[entry].frag.addr),
1294					skb->len, DMA_TO_DEVICE);
1295				dev_consume_skb_irq(np->tx_skbuff[entry]);
1296				np->tx_skbuff[entry] = NULL;
1297				np->tx_ring[entry].frag.addr = 0;
1298				np->tx_ring[entry].frag.length = 0;
1299			}
1300			spin_unlock(&np->lock);
1301		}
1302
1303		if (netif_queue_stopped(dev) &&
1304			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1305			/* The ring is no longer full, clear busy flag. */
1306			netif_wake_queue (dev);
1307		}
1308		/* Abnormal error summary/uncommon events handlers. */
1309		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1310			netdev_error(dev, intr_status);
1311	} while (0);
1312	if (netif_msg_intr(np))
1313		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1314			   dev->name, ioread16(ioaddr + IntrStatus));
1315	return IRQ_RETVAL(handled);
1316}
1317
1318static void rx_poll(struct tasklet_struct *t)
1319{
1320	struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
1321	struct net_device *dev = np->ndev;
1322	int entry = np->cur_rx % RX_RING_SIZE;
1323	int boguscnt = np->budget;
1324	void __iomem *ioaddr = np->base;
1325	int received = 0;
1326
1327	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1328	while (1) {
1329		struct netdev_desc *desc = &(np->rx_ring[entry]);
1330		u32 frame_status = le32_to_cpu(desc->status);
1331		int pkt_len;
1332
1333		if (--boguscnt < 0) {
1334			goto not_done;
1335		}
1336		if (!(frame_status & DescOwn))
1337			break;
1338		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1339		if (netif_msg_rx_status(np))
1340			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1341				   frame_status);
1342		if (frame_status & 0x001f4000) {
1343			/* There was a error. */
1344			if (netif_msg_rx_err(np))
1345				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1346					   frame_status);
1347			dev->stats.rx_errors++;
1348			if (frame_status & 0x00100000)
1349				dev->stats.rx_length_errors++;
1350			if (frame_status & 0x00010000)
1351				dev->stats.rx_fifo_errors++;
1352			if (frame_status & 0x00060000)
1353				dev->stats.rx_frame_errors++;
1354			if (frame_status & 0x00080000)
1355				dev->stats.rx_crc_errors++;
1356			if (frame_status & 0x00100000) {
1357				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1358					   " status %8.8x.\n",
1359					   dev->name, frame_status);
1360			}
1361		} else {
1362			struct sk_buff *skb;
1363#ifndef final_version
1364			if (netif_msg_rx_status(np))
1365				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1366					   ", bogus_cnt %d.\n",
1367					   pkt_len, boguscnt);
1368#endif
1369			/* Check if the packet is long enough to accept without copying
1370			   to a minimally-sized skbuff. */
1371			if (pkt_len < rx_copybreak &&
1372			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1373				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1374				dma_sync_single_for_cpu(&np->pci_dev->dev,
1375						le32_to_cpu(desc->frag.addr),
1376						np->rx_buf_sz, DMA_FROM_DEVICE);
1377				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1378				dma_sync_single_for_device(&np->pci_dev->dev,
1379						le32_to_cpu(desc->frag.addr),
1380						np->rx_buf_sz, DMA_FROM_DEVICE);
1381				skb_put(skb, pkt_len);
1382			} else {
1383				dma_unmap_single(&np->pci_dev->dev,
1384					le32_to_cpu(desc->frag.addr),
1385					np->rx_buf_sz, DMA_FROM_DEVICE);
1386				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1387				np->rx_skbuff[entry] = NULL;
1388			}
1389			skb->protocol = eth_type_trans(skb, dev);
1390			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1391			netif_rx(skb);
1392		}
1393		entry = (entry + 1) % RX_RING_SIZE;
1394		received++;
1395	}
1396	np->cur_rx = entry;
1397	refill_rx (dev);
1398	np->budget -= received;
1399	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1400	return;
1401
1402not_done:
1403	np->cur_rx = entry;
1404	refill_rx (dev);
1405	if (!received)
1406		received = 1;
1407	np->budget -= received;
1408	if (np->budget <= 0)
1409		np->budget = RX_BUDGET;
1410	tasklet_schedule(&np->rx_tasklet);
1411}
1412
1413static void refill_rx (struct net_device *dev)
1414{
1415	struct netdev_private *np = netdev_priv(dev);
1416	int entry;
 
1417
1418	/* Refill the Rx ring buffers. */
1419	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1420		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1421		struct sk_buff *skb;
1422		entry = np->dirty_rx % RX_RING_SIZE;
1423		if (np->rx_skbuff[entry] == NULL) {
1424			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1425			np->rx_skbuff[entry] = skb;
1426			if (skb == NULL)
1427				break;		/* Better luck next round. */
1428			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1429			np->rx_ring[entry].frag.addr = cpu_to_le32(
1430				dma_map_single(&np->pci_dev->dev, skb->data,
1431					np->rx_buf_sz, DMA_FROM_DEVICE));
1432			if (dma_mapping_error(&np->pci_dev->dev,
1433				    np->rx_ring[entry].frag.addr)) {
1434			    dev_kfree_skb_irq(skb);
1435			    np->rx_skbuff[entry] = NULL;
1436			    break;
1437			}
1438		}
1439		/* Perhaps we need not reset this field. */
1440		np->rx_ring[entry].frag.length =
1441			cpu_to_le32(np->rx_buf_sz | LastFrag);
1442		np->rx_ring[entry].status = 0;
 
1443	}
1444}
1445static void netdev_error(struct net_device *dev, int intr_status)
1446{
1447	struct netdev_private *np = netdev_priv(dev);
1448	void __iomem *ioaddr = np->base;
1449	u16 mii_ctl, mii_advertise, mii_lpa;
1450	int speed;
1451
1452	if (intr_status & LinkChange) {
1453		if (mdio_wait_link(dev, 10) == 0) {
1454			printk(KERN_INFO "%s: Link up\n", dev->name);
1455			if (np->an_enable) {
1456				mii_advertise = mdio_read(dev, np->phys[0],
1457							   MII_ADVERTISE);
1458				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1459				mii_advertise &= mii_lpa;
1460				printk(KERN_INFO "%s: Link changed: ",
1461					dev->name);
1462				if (mii_advertise & ADVERTISE_100FULL) {
1463					np->speed = 100;
1464					printk("100Mbps, full duplex\n");
1465				} else if (mii_advertise & ADVERTISE_100HALF) {
1466					np->speed = 100;
1467					printk("100Mbps, half duplex\n");
1468				} else if (mii_advertise & ADVERTISE_10FULL) {
1469					np->speed = 10;
1470					printk("10Mbps, full duplex\n");
1471				} else if (mii_advertise & ADVERTISE_10HALF) {
1472					np->speed = 10;
1473					printk("10Mbps, half duplex\n");
1474				} else
1475					printk("\n");
1476
1477			} else {
1478				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1479				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1480				np->speed = speed;
1481				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1482					dev->name, speed);
1483				printk("%s duplex.\n",
1484					(mii_ctl & BMCR_FULLDPLX) ?
1485						"full" : "half");
1486			}
1487			check_duplex(dev);
1488			if (np->flowctrl && np->mii_if.full_duplex) {
1489				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1490					ioaddr + MulticastFilter1+2);
1491				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1492					ioaddr + MACCtrl0);
1493			}
1494			netif_carrier_on(dev);
1495		} else {
1496			printk(KERN_INFO "%s: Link down\n", dev->name);
1497			netif_carrier_off(dev);
1498		}
1499	}
1500	if (intr_status & StatsMax) {
1501		get_stats(dev);
1502	}
1503	if (intr_status & IntrPCIErr) {
1504		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1505			   dev->name, intr_status);
1506		/* We must do a global reset of DMA to continue. */
1507	}
1508}
1509
1510static struct net_device_stats *get_stats(struct net_device *dev)
1511{
1512	struct netdev_private *np = netdev_priv(dev);
1513	void __iomem *ioaddr = np->base;
1514	unsigned long flags;
1515	u8 late_coll, single_coll, mult_coll;
1516
1517	spin_lock_irqsave(&np->statlock, flags);
1518	/* The chip only need report frame silently dropped. */
1519	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1520	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1521	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1522	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1523
1524	mult_coll = ioread8(ioaddr + StatsMultiColl);
1525	np->xstats.tx_multiple_collisions += mult_coll;
1526	single_coll = ioread8(ioaddr + StatsOneColl);
1527	np->xstats.tx_single_collisions += single_coll;
1528	late_coll = ioread8(ioaddr + StatsLateColl);
1529	np->xstats.tx_late_collisions += late_coll;
1530	dev->stats.collisions += mult_coll
1531		+ single_coll
1532		+ late_coll;
1533
1534	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1535	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1536	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1537	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1538	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1539	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1540	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1541
1542	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1543	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1544	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1545	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1546
1547	spin_unlock_irqrestore(&np->statlock, flags);
1548
1549	return &dev->stats;
1550}
1551
1552static void set_rx_mode(struct net_device *dev)
1553{
1554	struct netdev_private *np = netdev_priv(dev);
1555	void __iomem *ioaddr = np->base;
1556	u16 mc_filter[4];			/* Multicast hash filter */
1557	u32 rx_mode;
1558	int i;
1559
1560	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1561		memset(mc_filter, 0xff, sizeof(mc_filter));
1562		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1563	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1564		   (dev->flags & IFF_ALLMULTI)) {
1565		/* Too many to match, or accept all multicasts. */
1566		memset(mc_filter, 0xff, sizeof(mc_filter));
1567		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1568	} else if (!netdev_mc_empty(dev)) {
1569		struct netdev_hw_addr *ha;
1570		int bit;
1571		int index;
1572		int crc;
1573		memset (mc_filter, 0, sizeof (mc_filter));
1574		netdev_for_each_mc_addr(ha, dev) {
1575			crc = ether_crc_le(ETH_ALEN, ha->addr);
1576			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1577				if (crc & 0x80000000) index |= 1 << bit;
1578			mc_filter[index/16] |= (1 << (index % 16));
1579		}
1580		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1581	} else {
1582		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1583		return;
1584	}
1585	if (np->mii_if.full_duplex && np->flowctrl)
1586		mc_filter[3] |= 0x0200;
1587
1588	for (i = 0; i < 4; i++)
1589		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1590	iowrite8(rx_mode, ioaddr + RxMode);
1591}
1592
1593static int __set_mac_addr(struct net_device *dev)
1594{
1595	struct netdev_private *np = netdev_priv(dev);
1596	u16 addr16;
1597
1598	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1599	iowrite16(addr16, np->base + StationAddr);
1600	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1601	iowrite16(addr16, np->base + StationAddr+2);
1602	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1603	iowrite16(addr16, np->base + StationAddr+4);
1604	return 0;
1605}
1606
1607/* Invoked with rtnl_lock held */
1608static int sundance_set_mac_addr(struct net_device *dev, void *data)
1609{
1610	const struct sockaddr *addr = data;
1611
1612	if (!is_valid_ether_addr(addr->sa_data))
1613		return -EADDRNOTAVAIL;
1614	eth_hw_addr_set(dev, addr->sa_data);
1615	__set_mac_addr(dev);
1616
1617	return 0;
1618}
1619
1620static const struct {
1621	const char name[ETH_GSTRING_LEN];
1622} sundance_stats[] = {
1623	{ "tx_multiple_collisions" },
1624	{ "tx_single_collisions" },
1625	{ "tx_late_collisions" },
1626	{ "tx_deferred" },
1627	{ "tx_deferred_excessive" },
1628	{ "tx_aborted" },
1629	{ "tx_bcasts" },
1630	{ "rx_bcasts" },
1631	{ "tx_mcasts" },
1632	{ "rx_mcasts" },
1633};
1634
1635static int check_if_running(struct net_device *dev)
1636{
1637	if (!netif_running(dev))
1638		return -EINVAL;
1639	return 0;
1640}
1641
1642static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1643{
1644	struct netdev_private *np = netdev_priv(dev);
1645	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1646	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 
1647}
1648
1649static int get_link_ksettings(struct net_device *dev,
1650			      struct ethtool_link_ksettings *cmd)
1651{
1652	struct netdev_private *np = netdev_priv(dev);
1653	spin_lock_irq(&np->lock);
1654	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1655	spin_unlock_irq(&np->lock);
1656	return 0;
1657}
1658
1659static int set_link_ksettings(struct net_device *dev,
1660			      const struct ethtool_link_ksettings *cmd)
1661{
1662	struct netdev_private *np = netdev_priv(dev);
1663	int res;
1664	spin_lock_irq(&np->lock);
1665	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1666	spin_unlock_irq(&np->lock);
1667	return res;
1668}
1669
1670static int nway_reset(struct net_device *dev)
1671{
1672	struct netdev_private *np = netdev_priv(dev);
1673	return mii_nway_restart(&np->mii_if);
1674}
1675
1676static u32 get_link(struct net_device *dev)
1677{
1678	struct netdev_private *np = netdev_priv(dev);
1679	return mii_link_ok(&np->mii_if);
1680}
1681
1682static u32 get_msglevel(struct net_device *dev)
1683{
1684	struct netdev_private *np = netdev_priv(dev);
1685	return np->msg_enable;
1686}
1687
1688static void set_msglevel(struct net_device *dev, u32 val)
1689{
1690	struct netdev_private *np = netdev_priv(dev);
1691	np->msg_enable = val;
1692}
1693
1694static void get_strings(struct net_device *dev, u32 stringset,
1695		u8 *data)
1696{
1697	if (stringset == ETH_SS_STATS)
1698		memcpy(data, sundance_stats, sizeof(sundance_stats));
1699}
1700
1701static int get_sset_count(struct net_device *dev, int sset)
1702{
1703	switch (sset) {
1704	case ETH_SS_STATS:
1705		return ARRAY_SIZE(sundance_stats);
1706	default:
1707		return -EOPNOTSUPP;
1708	}
1709}
1710
1711static void get_ethtool_stats(struct net_device *dev,
1712		struct ethtool_stats *stats, u64 *data)
1713{
1714	struct netdev_private *np = netdev_priv(dev);
1715	int i = 0;
1716
1717	get_stats(dev);
1718	data[i++] = np->xstats.tx_multiple_collisions;
1719	data[i++] = np->xstats.tx_single_collisions;
1720	data[i++] = np->xstats.tx_late_collisions;
1721	data[i++] = np->xstats.tx_deferred;
1722	data[i++] = np->xstats.tx_deferred_excessive;
1723	data[i++] = np->xstats.tx_aborted;
1724	data[i++] = np->xstats.tx_bcasts;
1725	data[i++] = np->xstats.rx_bcasts;
1726	data[i++] = np->xstats.tx_mcasts;
1727	data[i++] = np->xstats.rx_mcasts;
1728}
1729
1730#ifdef CONFIG_PM
1731
1732static void sundance_get_wol(struct net_device *dev,
1733		struct ethtool_wolinfo *wol)
1734{
1735	struct netdev_private *np = netdev_priv(dev);
1736	void __iomem *ioaddr = np->base;
1737	u8 wol_bits;
1738
1739	wol->wolopts = 0;
1740
1741	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1742	if (!np->wol_enabled)
1743		return;
1744
1745	wol_bits = ioread8(ioaddr + WakeEvent);
1746	if (wol_bits & MagicPktEnable)
1747		wol->wolopts |= WAKE_MAGIC;
1748	if (wol_bits & LinkEventEnable)
1749		wol->wolopts |= WAKE_PHY;
1750}
1751
1752static int sundance_set_wol(struct net_device *dev,
1753	struct ethtool_wolinfo *wol)
1754{
1755	struct netdev_private *np = netdev_priv(dev);
1756	void __iomem *ioaddr = np->base;
1757	u8 wol_bits;
1758
1759	if (!device_can_wakeup(&np->pci_dev->dev))
1760		return -EOPNOTSUPP;
1761
1762	np->wol_enabled = !!(wol->wolopts);
1763	wol_bits = ioread8(ioaddr + WakeEvent);
1764	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1765			LinkEventEnable | WolEnable);
1766
1767	if (np->wol_enabled) {
1768		if (wol->wolopts & WAKE_MAGIC)
1769			wol_bits |= (MagicPktEnable | WolEnable);
1770		if (wol->wolopts & WAKE_PHY)
1771			wol_bits |= (LinkEventEnable | WolEnable);
1772	}
1773	iowrite8(wol_bits, ioaddr + WakeEvent);
1774
1775	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1776
1777	return 0;
1778}
1779#else
1780#define sundance_get_wol NULL
1781#define sundance_set_wol NULL
1782#endif /* CONFIG_PM */
1783
1784static const struct ethtool_ops ethtool_ops = {
1785	.begin = check_if_running,
1786	.get_drvinfo = get_drvinfo,
 
 
1787	.nway_reset = nway_reset,
1788	.get_link = get_link,
1789	.get_wol = sundance_get_wol,
1790	.set_wol = sundance_set_wol,
1791	.get_msglevel = get_msglevel,
1792	.set_msglevel = set_msglevel,
1793	.get_strings = get_strings,
1794	.get_sset_count = get_sset_count,
1795	.get_ethtool_stats = get_ethtool_stats,
1796	.get_link_ksettings = get_link_ksettings,
1797	.set_link_ksettings = set_link_ksettings,
1798};
1799
1800static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1801{
1802	struct netdev_private *np = netdev_priv(dev);
1803	int rc;
1804
1805	if (!netif_running(dev))
1806		return -EINVAL;
1807
1808	spin_lock_irq(&np->lock);
1809	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1810	spin_unlock_irq(&np->lock);
1811
1812	return rc;
1813}
1814
1815static int netdev_close(struct net_device *dev)
1816{
1817	struct netdev_private *np = netdev_priv(dev);
1818	void __iomem *ioaddr = np->base;
1819	struct sk_buff *skb;
1820	int i;
1821
1822	/* Wait and kill tasklet */
1823	tasklet_kill(&np->rx_tasklet);
1824	tasklet_kill(&np->tx_tasklet);
1825	np->cur_tx = 0;
1826	np->dirty_tx = 0;
1827	np->cur_task = 0;
1828	np->last_tx = NULL;
1829
1830	netif_stop_queue(dev);
1831
1832	if (netif_msg_ifdown(np)) {
1833		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1834			   "Rx %4.4x Int %2.2x.\n",
1835			   dev->name, ioread8(ioaddr + TxStatus),
1836			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1837		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1838			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1839	}
1840
1841	/* Disable interrupts by clearing the interrupt mask. */
1842	iowrite16(0x0000, ioaddr + IntrEnable);
1843
1844	/* Disable Rx and Tx DMA for safely release resource */
1845	iowrite32(0x500, ioaddr + DMACtrl);
1846
1847	/* Stop the chip's Tx and Rx processes. */
1848	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1849
1850	for (i = 2000; i > 0; i--) {
1851		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1852			break;
1853		mdelay(1);
1854	}
1855
1856	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1857			ioaddr + ASIC_HI_WORD(ASICCtrl));
1858
1859	for (i = 2000; i > 0; i--) {
1860		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1861			break;
1862		mdelay(1);
1863	}
1864
1865#ifdef __i386__
1866	if (netif_msg_hw(np)) {
1867		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1868			   (int)(np->tx_ring_dma));
1869		for (i = 0; i < TX_RING_SIZE; i++)
1870			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1871				   i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
1872				   np->tx_ring[i].frag.length);
1873		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1874			   (int)(np->rx_ring_dma));
1875		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1876			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1877				   i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
1878				   np->rx_ring[i].frag.length);
1879		}
1880	}
1881#endif /* __i386__ debugging only */
1882
1883	free_irq(np->pci_dev->irq, dev);
1884
1885	del_timer_sync(&np->timer);
1886
1887	/* Free all the skbuffs in the Rx queue. */
1888	for (i = 0; i < RX_RING_SIZE; i++) {
1889		np->rx_ring[i].status = 0;
1890		skb = np->rx_skbuff[i];
1891		if (skb) {
1892			dma_unmap_single(&np->pci_dev->dev,
1893				le32_to_cpu(np->rx_ring[i].frag.addr),
1894				np->rx_buf_sz, DMA_FROM_DEVICE);
1895			dev_kfree_skb(skb);
1896			np->rx_skbuff[i] = NULL;
1897		}
1898		np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
1899	}
1900	for (i = 0; i < TX_RING_SIZE; i++) {
1901		np->tx_ring[i].next_desc = 0;
1902		skb = np->tx_skbuff[i];
1903		if (skb) {
1904			dma_unmap_single(&np->pci_dev->dev,
1905				le32_to_cpu(np->tx_ring[i].frag.addr),
1906				skb->len, DMA_TO_DEVICE);
1907			dev_kfree_skb(skb);
1908			np->tx_skbuff[i] = NULL;
1909		}
1910	}
1911
1912	return 0;
1913}
1914
1915static void sundance_remove1(struct pci_dev *pdev)
1916{
1917	struct net_device *dev = pci_get_drvdata(pdev);
1918
1919	if (dev) {
1920	    struct netdev_private *np = netdev_priv(dev);
1921	    unregister_netdev(dev);
1922	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1923		    np->rx_ring, np->rx_ring_dma);
1924	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1925		    np->tx_ring, np->tx_ring_dma);
1926	    pci_iounmap(pdev, np->base);
1927	    pci_release_regions(pdev);
1928	    free_netdev(dev);
 
1929	}
1930}
1931
1932static int __maybe_unused sundance_suspend(struct device *dev_d)
 
 
1933{
1934	struct net_device *dev = dev_get_drvdata(dev_d);
1935	struct netdev_private *np = netdev_priv(dev);
1936	void __iomem *ioaddr = np->base;
1937
1938	if (!netif_running(dev))
1939		return 0;
1940
1941	netdev_close(dev);
1942	netif_device_detach(dev);
1943
1944	if (np->wol_enabled) {
1945		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1946		iowrite16(RxEnable, ioaddr + MACCtrl1);
1947	}
1948
1949	device_set_wakeup_enable(dev_d, np->wol_enabled);
1950
1951	return 0;
1952}
1953
1954static int __maybe_unused sundance_resume(struct device *dev_d)
1955{
1956	struct net_device *dev = dev_get_drvdata(dev_d);
1957	int err = 0;
1958
1959	if (!netif_running(dev))
1960		return 0;
1961
 
 
 
1962	err = netdev_open(dev);
1963	if (err) {
1964		printk(KERN_ERR "%s: Can't resume interface!\n",
1965				dev->name);
1966		goto out;
1967	}
1968
1969	netif_device_attach(dev);
1970
1971out:
1972	return err;
1973}
1974
1975static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1976
1977static struct pci_driver sundance_driver = {
1978	.name		= DRV_NAME,
1979	.id_table	= sundance_pci_tbl,
1980	.probe		= sundance_probe1,
1981	.remove		= sundance_remove1,
1982	.driver.pm	= &sundance_pm_ops,
 
 
 
1983};
1984
1985module_pci_driver(sundance_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v3.5.6
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3	Written 1999-2000 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	The author may be reached as becker@scyld.com, or C/O
  13	Scyld Computing Corporation
  14	410 Severn Ave., Suite 210
  15	Annapolis MD 21403
  16
  17	Support and updates available at
  18	http://www.scyld.com/network/sundance.html
  19	[link no longer provides useful info -jgarzik]
  20	Archives of the mailing list are still available at
  21	http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME	"sundance"
  26#define DRV_VERSION	"1.2"
  27#define DRV_RELDATE	"11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45		 autosense	Autosensing active media.
  46		 10mbps_hd 	10Mbps half duplex.
  47		 10mbps_fd 	10Mbps full duplex.
  48		 100mbps_hd 	100Mbps half duplex.
  49		 100mbps_fd 	100Mbps full duplex.
  50		 0		Autosensing active media.
  51		 1	 	10Mbps half duplex.
  52		 2	 	10Mbps full duplex.
  53		 3	 	100Mbps half duplex.
  54		 4	 	100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE	32
  69#define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE	64
  71#define RX_BUDGET	32
  72#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <asm/uaccess.h>
  95#include <asm/processor.h>		/* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#include <linux/dma-mapping.h>
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103
 104/* These identify the driver base version and may not be removed. */
 105static const char version[] __devinitconst =
 106	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 107	" Written by Donald Becker\n";
 108
 109MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 110MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 111MODULE_LICENSE("GPL");
 112
 113module_param(debug, int, 0);
 114module_param(rx_copybreak, int, 0);
 115module_param_array(media, charp, NULL, 0);
 116module_param(flowctrl, int, 0);
 117MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 118MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 119MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 120
 121/*
 122				Theory of Operation
 123
 124I. Board Compatibility
 125
 126This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 127
 128II. Board-specific settings
 129
 130III. Driver operation
 131
 132IIIa. Ring buffers
 133
 134This driver uses two statically allocated fixed-size descriptor lists
 135formed into rings by a branch from the final descriptor to the beginning of
 136the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 137Some chips explicitly use only 2^N sized rings, while others use a
 138'next descriptor' pointer that the driver forms into rings.
 139
 140IIIb/c. Transmit/Receive Structure
 141
 142This driver uses a zero-copy receive and transmit scheme.
 143The driver allocates full frame size skbuffs for the Rx ring buffers at
 144open() time and passes the skb->data field to the chip as receive data
 145buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 146a fresh skbuff is allocated and the frame is copied to the new skbuff.
 147When the incoming frame is larger, the skbuff is passed directly up the
 148protocol stack.  Buffers consumed this way are replaced by newly allocated
 149skbuffs in a later phase of receives.
 150
 151The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 152using a full-sized skbuff for small frames vs. the copying costs of larger
 153frames.  New boards are typically used in generously configured machines
 154and the underfilled buffers have negligible impact compared to the benefit of
 155a single allocation size, so the default value of zero results in never
 156copying packets.  When copying is done, the cost is usually mitigated by using
 157a combined copy/checksum routine.  Copying also preloads the cache, which is
 158most useful with small frames.
 159
 160A subtle aspect of the operation is that the IP header at offset 14 in an
 161ethernet frame isn't longword aligned for further processing.
 162Unaligned buffers are permitted by the Sundance hardware, so
 163frames are received into the skbuff at an offset of "+2", 16-byte aligning
 164the IP header.
 165
 166IIId. Synchronization
 167
 168The driver runs as two independent, single-threaded flows of control.  One
 169is the send-packet routine, which enforces single-threaded use by the
 170dev->tbusy flag.  The other thread is the interrupt handler, which is single
 171threaded by the hardware and interrupt handling software.
 172
 173The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 174flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 175queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 176the 'lp->tx_full' flag.
 177
 178The interrupt handler has exclusive control over the Rx ring and records stats
 179from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 180empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 181clears both the tx_full and tbusy flags.
 182
 183IV. Notes
 184
 185IVb. References
 186
 187The Sundance ST201 datasheet, preliminary version.
 188The Kendin KS8723 datasheet, preliminary version.
 189The ICplus IP100 datasheet, preliminary version.
 190http://www.scyld.com/expert/100mbps.html
 191http://www.scyld.com/expert/NWay.html
 192
 193IVc. Errata
 194
 195*/
 196
 197/* Work-around for Kendin chip bugs. */
 198#ifndef CONFIG_SUNDANCE_MMIO
 199#define USE_IO_OPS 1
 200#endif
 201
 202static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
 203	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 204	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 205	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 206	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 207	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 208	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 209	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 210	{ }
 211};
 212MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 213
 214enum {
 215	netdev_io_size = 128
 216};
 217
 218struct pci_id_info {
 219        const char *name;
 220};
 221static const struct pci_id_info pci_id_tbl[] __devinitdata = {
 222	{"D-Link DFE-550TX FAST Ethernet Adapter"},
 223	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 224	{"D-Link DFE-580TX 4 port Server Adapter"},
 225	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
 226	{"D-Link DL10050-based FAST Ethernet Adapter"},
 227	{"Sundance Technology Alta"},
 228	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 229	{ }	/* terminate list. */
 230};
 231
 232/* This driver was written to use PCI memory space, however x86-oriented
 233   hardware often uses I/O space accesses. */
 234
 235/* Offsets to the device registers.
 236   Unlike software-only systems, device drivers interact with complex hardware.
 237   It's not useful to define symbolic names for every register bit in the
 238   device.  The name can only partially document the semantics and make
 239   the driver longer and more difficult to read.
 240   In general, only the important configuration values or bits changed
 241   multiple times should be defined symbolically.
 242*/
 243enum alta_offsets {
 244	DMACtrl = 0x00,
 245	TxListPtr = 0x04,
 246	TxDMABurstThresh = 0x08,
 247	TxDMAUrgentThresh = 0x09,
 248	TxDMAPollPeriod = 0x0a,
 249	RxDMAStatus = 0x0c,
 250	RxListPtr = 0x10,
 251	DebugCtrl0 = 0x1a,
 252	DebugCtrl1 = 0x1c,
 253	RxDMABurstThresh = 0x14,
 254	RxDMAUrgentThresh = 0x15,
 255	RxDMAPollPeriod = 0x16,
 256	LEDCtrl = 0x1a,
 257	ASICCtrl = 0x30,
 258	EEData = 0x34,
 259	EECtrl = 0x36,
 260	FlashAddr = 0x40,
 261	FlashData = 0x44,
 
 262	TxStatus = 0x46,
 263	TxFrameId = 0x47,
 264	DownCounter = 0x18,
 265	IntrClear = 0x4a,
 266	IntrEnable = 0x4c,
 267	IntrStatus = 0x4e,
 268	MACCtrl0 = 0x50,
 269	MACCtrl1 = 0x52,
 270	StationAddr = 0x54,
 271	MaxFrameSize = 0x5A,
 272	RxMode = 0x5c,
 273	MIICtrl = 0x5e,
 274	MulticastFilter0 = 0x60,
 275	MulticastFilter1 = 0x64,
 276	RxOctetsLow = 0x68,
 277	RxOctetsHigh = 0x6a,
 278	TxOctetsLow = 0x6c,
 279	TxOctetsHigh = 0x6e,
 280	TxFramesOK = 0x70,
 281	RxFramesOK = 0x72,
 282	StatsCarrierError = 0x74,
 283	StatsLateColl = 0x75,
 284	StatsMultiColl = 0x76,
 285	StatsOneColl = 0x77,
 286	StatsTxDefer = 0x78,
 287	RxMissed = 0x79,
 288	StatsTxXSDefer = 0x7a,
 289	StatsTxAbort = 0x7b,
 290	StatsBcastTx = 0x7c,
 291	StatsBcastRx = 0x7d,
 292	StatsMcastTx = 0x7e,
 293	StatsMcastRx = 0x7f,
 294	/* Aliased and bogus values! */
 295	RxStatus = 0x0c,
 296};
 297
 298#define ASIC_HI_WORD(x)	((x) + 2)
 299
 300enum ASICCtrl_HiWord_bit {
 301	GlobalReset = 0x0001,
 302	RxReset = 0x0002,
 303	TxReset = 0x0004,
 304	DMAReset = 0x0008,
 305	FIFOReset = 0x0010,
 306	NetworkReset = 0x0020,
 307	HostReset = 0x0040,
 308	ResetBusy = 0x0400,
 309};
 310
 311/* Bits in the interrupt status/mask registers. */
 312enum intr_status_bits {
 313	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 314	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 315	IntrDrvRqst=0x0040,
 316	StatsMax=0x0080, LinkChange=0x0100,
 317	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 318};
 319
 320/* Bits in the RxMode register. */
 321enum rx_mode_bits {
 322	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 323	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 324};
 325/* Bits in MACCtrl. */
 326enum mac_ctrl0_bits {
 327	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 328	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 329};
 330enum mac_ctrl1_bits {
 331	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
 332	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 333	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 334};
 335
 
 
 
 
 
 
 
 
 336/* The Rx and Tx buffer descriptors. */
 337/* Note that using only 32 bit fields simplifies conversion to big-endian
 338   architectures. */
 339struct netdev_desc {
 340	__le32 next_desc;
 341	__le32 status;
 342	struct desc_frag { __le32 addr, length; } frag[1];
 343};
 344
 345/* Bits in netdev_desc.status */
 346enum desc_status_bits {
 347	DescOwn=0x8000,
 348	DescEndPacket=0x4000,
 349	DescEndRing=0x2000,
 350	LastFrag=0x80000000,
 351	DescIntrOnTx=0x8000,
 352	DescIntrOnDMADone=0x80000000,
 353	DisableAlign = 0x00000001,
 354};
 355
 356#define PRIV_ALIGN	15 	/* Required alignment mask */
 357/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 358   within the structure. */
 359#define MII_CNT		4
 360struct netdev_private {
 361	/* Descriptor rings first for alignment. */
 362	struct netdev_desc *rx_ring;
 363	struct netdev_desc *tx_ring;
 364	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 365	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 366        dma_addr_t tx_ring_dma;
 367        dma_addr_t rx_ring_dma;
 368	struct timer_list timer;		/* Media monitoring timer. */
 
 369	/* ethtool extra stats */
 370	struct {
 371		u64 tx_multiple_collisions;
 372		u64 tx_single_collisions;
 373		u64 tx_late_collisions;
 374		u64 tx_deferred;
 375		u64 tx_deferred_excessive;
 376		u64 tx_aborted;
 377		u64 tx_bcasts;
 378		u64 rx_bcasts;
 379		u64 tx_mcasts;
 380		u64 rx_mcasts;
 381	} xstats;
 382	/* Frequently used values: keep some adjacent for cache effect. */
 383	spinlock_t lock;
 384	int msg_enable;
 385	int chip_id;
 386	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 387	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
 388	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
 389	unsigned int cur_tx, dirty_tx;
 390	/* These values are keep track of the transceiver/media in use. */
 391	unsigned int flowctrl:1;
 392	unsigned int default_port:4;		/* Last dev->if_port value. */
 393	unsigned int an_enable:1;
 394	unsigned int speed;
 
 395	struct tasklet_struct rx_tasklet;
 396	struct tasklet_struct tx_tasklet;
 397	int budget;
 398	int cur_task;
 399	/* Multicast and receive mode. */
 400	spinlock_t mcastlock;			/* SMP lock multicast updates. */
 401	u16 mcast_filter[4];
 402	/* MII transceiver section. */
 403	struct mii_if_info mii_if;
 404	int mii_preamble_required;
 405	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
 406	struct pci_dev *pci_dev;
 407	void __iomem *base;
 408	spinlock_t statlock;
 409};
 410
 411/* The station address location in the EEPROM. */
 412#define EEPROM_SA_OFFSET	0x10
 413#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 414			IntrDrvRqst | IntrTxDone | StatsMax | \
 415			LinkChange)
 416
 417static int  change_mtu(struct net_device *dev, int new_mtu);
 418static int  eeprom_read(void __iomem *ioaddr, int location);
 419static int  mdio_read(struct net_device *dev, int phy_id, int location);
 420static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 421static int  mdio_wait_link(struct net_device *dev, int wait);
 422static int  netdev_open(struct net_device *dev);
 423static void check_duplex(struct net_device *dev);
 424static void netdev_timer(unsigned long data);
 425static void tx_timeout(struct net_device *dev);
 426static void init_ring(struct net_device *dev);
 427static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 428static int reset_tx (struct net_device *dev);
 429static irqreturn_t intr_handler(int irq, void *dev_instance);
 430static void rx_poll(unsigned long data);
 431static void tx_poll(unsigned long data);
 432static void refill_rx (struct net_device *dev);
 433static void netdev_error(struct net_device *dev, int intr_status);
 434static void netdev_error(struct net_device *dev, int intr_status);
 435static void set_rx_mode(struct net_device *dev);
 436static int __set_mac_addr(struct net_device *dev);
 437static int sundance_set_mac_addr(struct net_device *dev, void *data);
 438static struct net_device_stats *get_stats(struct net_device *dev);
 439static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 440static int  netdev_close(struct net_device *dev);
 441static const struct ethtool_ops ethtool_ops;
 442
 443static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 444{
 445	struct netdev_private *np = netdev_priv(dev);
 446	void __iomem *ioaddr = np->base + ASICCtrl;
 447	int countdown;
 448
 449	/* ST201 documentation states ASICCtrl is a 32bit register */
 450	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 451	/* ST201 documentation states reset can take up to 1 ms */
 452	countdown = 10 + 1;
 453	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 454		if (--countdown == 0) {
 455			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 456			break;
 457		}
 458		udelay(100);
 459	}
 460}
 461
 
 
 
 
 
 
 
 
 
 
 
 462static const struct net_device_ops netdev_ops = {
 463	.ndo_open		= netdev_open,
 464	.ndo_stop		= netdev_close,
 465	.ndo_start_xmit		= start_tx,
 466	.ndo_get_stats 		= get_stats,
 467	.ndo_set_rx_mode	= set_rx_mode,
 468	.ndo_do_ioctl 		= netdev_ioctl,
 469	.ndo_tx_timeout		= tx_timeout,
 470	.ndo_change_mtu		= change_mtu,
 471	.ndo_set_mac_address 	= sundance_set_mac_addr,
 472	.ndo_validate_addr	= eth_validate_addr,
 
 
 
 473};
 474
 475static int __devinit sundance_probe1 (struct pci_dev *pdev,
 476				      const struct pci_device_id *ent)
 477{
 478	struct net_device *dev;
 479	struct netdev_private *np;
 480	static int card_idx;
 481	int chip_idx = ent->driver_data;
 482	int irq;
 483	int i;
 484	void __iomem *ioaddr;
 485	u16 mii_ctl;
 486	void *ring_space;
 487	dma_addr_t ring_dma;
 488#ifdef USE_IO_OPS
 489	int bar = 0;
 490#else
 491	int bar = 1;
 492#endif
 493	int phy, phy_end, phy_idx = 0;
 494
 495/* when built into the kernel, we only print version if device is found */
 496#ifndef MODULE
 497	static int printed_version;
 498	if (!printed_version++)
 499		printk(version);
 500#endif
 501
 502	if (pci_enable_device(pdev))
 503		return -EIO;
 504	pci_set_master(pdev);
 505
 506	irq = pdev->irq;
 507
 508	dev = alloc_etherdev(sizeof(*np));
 509	if (!dev)
 510		return -ENOMEM;
 511	SET_NETDEV_DEV(dev, &pdev->dev);
 512
 513	if (pci_request_regions(pdev, DRV_NAME))
 514		goto err_out_netdev;
 515
 516	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 517	if (!ioaddr)
 518		goto err_out_res;
 519
 520	for (i = 0; i < 3; i++)
 521		((__le16 *)dev->dev_addr)[i] =
 522			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 523	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 524
 525	np = netdev_priv(dev);
 
 526	np->base = ioaddr;
 527	np->pci_dev = pdev;
 528	np->chip_id = chip_idx;
 529	np->msg_enable = (1 << debug) - 1;
 530	spin_lock_init(&np->lock);
 531	spin_lock_init(&np->statlock);
 532	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 533	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 534
 535	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
 536			&ring_dma, GFP_KERNEL);
 537	if (!ring_space)
 538		goto err_out_cleardev;
 539	np->tx_ring = (struct netdev_desc *)ring_space;
 540	np->tx_ring_dma = ring_dma;
 541
 542	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
 543			&ring_dma, GFP_KERNEL);
 544	if (!ring_space)
 545		goto err_out_unmap_tx;
 546	np->rx_ring = (struct netdev_desc *)ring_space;
 547	np->rx_ring_dma = ring_dma;
 548
 549	np->mii_if.dev = dev;
 550	np->mii_if.mdio_read = mdio_read;
 551	np->mii_if.mdio_write = mdio_write;
 552	np->mii_if.phy_id_mask = 0x1f;
 553	np->mii_if.reg_num_mask = 0x1f;
 554
 555	/* The chip-specific entries in the device structure. */
 556	dev->netdev_ops = &netdev_ops;
 557	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 558	dev->watchdog_timeo = TX_TIMEOUT;
 559
 
 
 
 
 560	pci_set_drvdata(pdev, dev);
 561
 562	i = register_netdev(dev);
 563	if (i)
 564		goto err_out_unmap_rx;
 565
 566	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 567	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 568	       dev->dev_addr, irq);
 569
 570	np->phys[0] = 1;		/* Default setting */
 571	np->mii_preamble_required++;
 572
 573	/*
 574	 * It seems some phys doesn't deal well with address 0 being accessed
 575	 * first
 576	 */
 577	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 578		phy = 0;
 579		phy_end = 31;
 580	} else {
 581		phy = 1;
 582		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
 583	}
 584	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 585		int phyx = phy & 0x1f;
 586		int mii_status = mdio_read(dev, phyx, MII_BMSR);
 587		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 588			np->phys[phy_idx++] = phyx;
 589			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 590			if ((mii_status & 0x0040) == 0)
 591				np->mii_preamble_required++;
 592			printk(KERN_INFO "%s: MII PHY found at address %d, status "
 593				   "0x%4.4x advertising %4.4x.\n",
 594				   dev->name, phyx, mii_status, np->mii_if.advertising);
 595		}
 596	}
 597	np->mii_preamble_required--;
 598
 599	if (phy_idx == 0) {
 600		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 601			   dev->name, ioread32(ioaddr + ASICCtrl));
 602		goto err_out_unregister;
 603	}
 604
 605	np->mii_if.phy_id = np->phys[0];
 606
 607	/* Parse override configuration */
 608	np->an_enable = 1;
 609	if (card_idx < MAX_UNITS) {
 610		if (media[card_idx] != NULL) {
 611			np->an_enable = 0;
 612			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 613			    strcmp (media[card_idx], "4") == 0) {
 614				np->speed = 100;
 615				np->mii_if.full_duplex = 1;
 616			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 617				   strcmp (media[card_idx], "3") == 0) {
 618				np->speed = 100;
 619				np->mii_if.full_duplex = 0;
 620			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 621				   strcmp (media[card_idx], "2") == 0) {
 622				np->speed = 10;
 623				np->mii_if.full_duplex = 1;
 624			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 625				   strcmp (media[card_idx], "1") == 0) {
 626				np->speed = 10;
 627				np->mii_if.full_duplex = 0;
 628			} else {
 629				np->an_enable = 1;
 630			}
 631		}
 632		if (flowctrl == 1)
 633			np->flowctrl = 1;
 634	}
 635
 636	/* Fibre PHY? */
 637	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 638		/* Default 100Mbps Full */
 639		if (np->an_enable) {
 640			np->speed = 100;
 641			np->mii_if.full_duplex = 1;
 642			np->an_enable = 0;
 643		}
 644	}
 645	/* Reset PHY */
 646	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 647	mdelay (300);
 648	/* If flow control enabled, we need to advertise it.*/
 649	if (np->flowctrl)
 650		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 651	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 652	/* Force media type */
 653	if (!np->an_enable) {
 654		mii_ctl = 0;
 655		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 656		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 657		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 658		printk (KERN_INFO "Override speed=%d, %s duplex\n",
 659			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 660
 661	}
 662
 663	/* Perhaps move the reset here? */
 664	/* Reset the chip to erase previous misconfiguration. */
 665	if (netif_msg_hw(np))
 666		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 667	sundance_reset(dev, 0x00ff << 16);
 668	if (netif_msg_hw(np))
 669		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 670
 671	card_idx++;
 672	return 0;
 673
 674err_out_unregister:
 675	unregister_netdev(dev);
 676err_out_unmap_rx:
 677	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
 678		np->rx_ring, np->rx_ring_dma);
 679err_out_unmap_tx:
 680	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
 681		np->tx_ring, np->tx_ring_dma);
 682err_out_cleardev:
 683	pci_set_drvdata(pdev, NULL);
 684	pci_iounmap(pdev, ioaddr);
 685err_out_res:
 686	pci_release_regions(pdev);
 687err_out_netdev:
 688	free_netdev (dev);
 689	return -ENODEV;
 690}
 691
 692static int change_mtu(struct net_device *dev, int new_mtu)
 693{
 694	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
 695		return -EINVAL;
 696	if (netif_running(dev))
 697		return -EBUSY;
 698	dev->mtu = new_mtu;
 699	return 0;
 700}
 701
 702#define eeprom_delay(ee_addr)	ioread32(ee_addr)
 703/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 704static int __devinit eeprom_read(void __iomem *ioaddr, int location)
 705{
 706	int boguscnt = 10000;		/* Typical 1900 ticks. */
 707	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 708	do {
 709		eeprom_delay(ioaddr + EECtrl);
 710		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 711			return ioread16(ioaddr + EEData);
 712		}
 713	} while (--boguscnt > 0);
 714	return 0;
 715}
 716
 717/*  MII transceiver control section.
 718	Read and write the MII registers using software-generated serial
 719	MDIO protocol.  See the MII specifications or DP83840A data sheet
 720	for details.
 721
 722	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 723	met by back-to-back 33Mhz PCI cycles. */
 724#define mdio_delay() ioread8(mdio_addr)
 725
 726enum mii_reg_bits {
 727	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 728};
 729#define MDIO_EnbIn  (0)
 730#define MDIO_WRITE0 (MDIO_EnbOutput)
 731#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 732
 733/* Generate the preamble required for initial synchronization and
 734   a few older transceivers. */
 735static void mdio_sync(void __iomem *mdio_addr)
 736{
 737	int bits = 32;
 738
 739	/* Establish sync by sending at least 32 logic ones. */
 740	while (--bits >= 0) {
 741		iowrite8(MDIO_WRITE1, mdio_addr);
 742		mdio_delay();
 743		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 744		mdio_delay();
 745	}
 746}
 747
 748static int mdio_read(struct net_device *dev, int phy_id, int location)
 749{
 750	struct netdev_private *np = netdev_priv(dev);
 751	void __iomem *mdio_addr = np->base + MIICtrl;
 752	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 753	int i, retval = 0;
 754
 755	if (np->mii_preamble_required)
 756		mdio_sync(mdio_addr);
 757
 758	/* Shift the read command bits out. */
 759	for (i = 15; i >= 0; i--) {
 760		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 761
 762		iowrite8(dataval, mdio_addr);
 763		mdio_delay();
 764		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 765		mdio_delay();
 766	}
 767	/* Read the two transition, 16 data, and wire-idle bits. */
 768	for (i = 19; i > 0; i--) {
 769		iowrite8(MDIO_EnbIn, mdio_addr);
 770		mdio_delay();
 771		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 772		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 773		mdio_delay();
 774	}
 775	return (retval>>1) & 0xffff;
 776}
 777
 778static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 779{
 780	struct netdev_private *np = netdev_priv(dev);
 781	void __iomem *mdio_addr = np->base + MIICtrl;
 782	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 783	int i;
 784
 785	if (np->mii_preamble_required)
 786		mdio_sync(mdio_addr);
 787
 788	/* Shift the command bits out. */
 789	for (i = 31; i >= 0; i--) {
 790		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 791
 792		iowrite8(dataval, mdio_addr);
 793		mdio_delay();
 794		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 795		mdio_delay();
 796	}
 797	/* Clear out extra bits. */
 798	for (i = 2; i > 0; i--) {
 799		iowrite8(MDIO_EnbIn, mdio_addr);
 800		mdio_delay();
 801		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 802		mdio_delay();
 803	}
 804}
 805
 806static int mdio_wait_link(struct net_device *dev, int wait)
 807{
 808	int bmsr;
 809	int phy_id;
 810	struct netdev_private *np;
 811
 812	np = netdev_priv(dev);
 813	phy_id = np->phys[0];
 814
 815	do {
 816		bmsr = mdio_read(dev, phy_id, MII_BMSR);
 817		if (bmsr & 0x0004)
 818			return 0;
 819		mdelay(1);
 820	} while (--wait > 0);
 821	return -1;
 822}
 823
 824static int netdev_open(struct net_device *dev)
 825{
 826	struct netdev_private *np = netdev_priv(dev);
 827	void __iomem *ioaddr = np->base;
 828	const int irq = np->pci_dev->irq;
 829	unsigned long flags;
 830	int i;
 831
 832	/* Do we need to reset the chip??? */
 833
 834	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 835	if (i)
 836		return i;
 837
 838	if (netif_msg_ifup(np))
 839		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
 840
 841	init_ring(dev);
 842
 843	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 844	/* The Tx list pointer is written as packets are queued. */
 845
 846	/* Initialize other registers. */
 847	__set_mac_addr(dev);
 848#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 849	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 850#else
 851	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 852#endif
 853	if (dev->mtu > 2047)
 854		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 855
 856	/* Configure the PCI bus bursts and FIFO thresholds. */
 857
 858	if (dev->if_port == 0)
 859		dev->if_port = np->default_port;
 860
 861	spin_lock_init(&np->mcastlock);
 862
 863	set_rx_mode(dev);
 864	iowrite16(0, ioaddr + IntrEnable);
 865	iowrite16(0, ioaddr + DownCounter);
 866	/* Set the chip to poll every N*320nsec. */
 867	iowrite8(100, ioaddr + RxDMAPollPeriod);
 868	iowrite8(127, ioaddr + TxDMAPollPeriod);
 869	/* Fix DFE-580TX packet drop issue */
 870	if (np->pci_dev->revision >= 0x14)
 871		iowrite8(0x01, ioaddr + DebugCtrl1);
 872	netif_start_queue(dev);
 873
 874	spin_lock_irqsave(&np->lock, flags);
 875	reset_tx(dev);
 876	spin_unlock_irqrestore(&np->lock, flags);
 877
 878	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 879
 
 
 
 
 880	if (netif_msg_ifup(np))
 881		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 882			   "MAC Control %x, %4.4x %4.4x.\n",
 883			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 884			   ioread32(ioaddr + MACCtrl0),
 885			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 886
 887	/* Set the timer to check for link beat. */
 888	init_timer(&np->timer);
 889	np->timer.expires = jiffies + 3*HZ;
 890	np->timer.data = (unsigned long)dev;
 891	np->timer.function = netdev_timer;				/* timer handler */
 892	add_timer(&np->timer);
 893
 894	/* Enable interrupts by setting the interrupt mask. */
 895	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 896
 897	return 0;
 898}
 899
 900static void check_duplex(struct net_device *dev)
 901{
 902	struct netdev_private *np = netdev_priv(dev);
 903	void __iomem *ioaddr = np->base;
 904	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 905	int negotiated = mii_lpa & np->mii_if.advertising;
 906	int duplex;
 907
 908	/* Force media */
 909	if (!np->an_enable || mii_lpa == 0xffff) {
 910		if (np->mii_if.full_duplex)
 911			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 912				ioaddr + MACCtrl0);
 913		return;
 914	}
 915
 916	/* Autonegotiation */
 917	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 918	if (np->mii_if.full_duplex != duplex) {
 919		np->mii_if.full_duplex = duplex;
 920		if (netif_msg_link(np))
 921			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 922				   "negotiated capability %4.4x.\n", dev->name,
 923				   duplex ? "full" : "half", np->phys[0], negotiated);
 924		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 925	}
 926}
 927
 928static void netdev_timer(unsigned long data)
 929{
 930	struct net_device *dev = (struct net_device *)data;
 931	struct netdev_private *np = netdev_priv(dev);
 932	void __iomem *ioaddr = np->base;
 933	int next_tick = 10*HZ;
 934
 935	if (netif_msg_timer(np)) {
 936		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 937			   "Tx %x Rx %x.\n",
 938			   dev->name, ioread16(ioaddr + IntrEnable),
 939			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 940	}
 941	check_duplex(dev);
 942	np->timer.expires = jiffies + next_tick;
 943	add_timer(&np->timer);
 944}
 945
 946static void tx_timeout(struct net_device *dev)
 947{
 948	struct netdev_private *np = netdev_priv(dev);
 949	void __iomem *ioaddr = np->base;
 950	unsigned long flag;
 951
 952	netif_stop_queue(dev);
 953	tasklet_disable(&np->tx_tasklet);
 954	iowrite16(0, ioaddr + IntrEnable);
 955	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 956		   "TxFrameId %2.2x,"
 957		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 958		   ioread8(ioaddr + TxFrameId));
 959
 960	{
 961		int i;
 962		for (i=0; i<TX_RING_SIZE; i++) {
 963			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 964				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 965				le32_to_cpu(np->tx_ring[i].next_desc),
 966				le32_to_cpu(np->tx_ring[i].status),
 967				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 968				le32_to_cpu(np->tx_ring[i].frag[0].addr),
 969				le32_to_cpu(np->tx_ring[i].frag[0].length));
 970		}
 971		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 972			ioread32(np->base + TxListPtr),
 973			netif_queue_stopped(dev));
 974		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 975			np->cur_tx, np->cur_tx % TX_RING_SIZE,
 976			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 977		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 978		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 979	}
 980	spin_lock_irqsave(&np->lock, flag);
 981
 982	/* Stop and restart the chip's Tx processes . */
 983	reset_tx(dev);
 984	spin_unlock_irqrestore(&np->lock, flag);
 985
 986	dev->if_port = 0;
 987
 988	dev->trans_start = jiffies; /* prevent tx timeout */
 989	dev->stats.tx_errors++;
 990	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
 991		netif_wake_queue(dev);
 992	}
 993	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 994	tasklet_enable(&np->tx_tasklet);
 995}
 996
 997
 998/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 999static void init_ring(struct net_device *dev)
1000{
1001	struct netdev_private *np = netdev_priv(dev);
1002	int i;
1003
1004	np->cur_rx = np->cur_tx = 0;
1005	np->dirty_rx = np->dirty_tx = 0;
1006	np->cur_task = 0;
1007
1008	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1009
1010	/* Initialize all Rx descriptors. */
1011	for (i = 0; i < RX_RING_SIZE; i++) {
1012		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1013			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1014		np->rx_ring[i].status = 0;
1015		np->rx_ring[i].frag[0].length = 0;
1016		np->rx_skbuff[i] = NULL;
1017	}
1018
1019	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1020	for (i = 0; i < RX_RING_SIZE; i++) {
1021		struct sk_buff *skb =
1022			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1023		np->rx_skbuff[i] = skb;
1024		if (skb == NULL)
1025			break;
1026		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1027		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1028			dma_map_single(&np->pci_dev->dev, skb->data,
1029				np->rx_buf_sz, DMA_FROM_DEVICE));
1030		if (dma_mapping_error(&np->pci_dev->dev,
1031					np->rx_ring[i].frag[0].addr)) {
1032			dev_kfree_skb(skb);
1033			np->rx_skbuff[i] = NULL;
1034			break;
1035		}
1036		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1037	}
1038	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1039
1040	for (i = 0; i < TX_RING_SIZE; i++) {
1041		np->tx_skbuff[i] = NULL;
1042		np->tx_ring[i].status = 0;
1043	}
1044}
1045
1046static void tx_poll (unsigned long data)
1047{
1048	struct net_device *dev = (struct net_device *)data;
1049	struct netdev_private *np = netdev_priv(dev);
1050	unsigned head = np->cur_task % TX_RING_SIZE;
1051	struct netdev_desc *txdesc =
1052		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1053
1054	/* Chain the next pointer */
1055	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1056		int entry = np->cur_task % TX_RING_SIZE;
1057		txdesc = &np->tx_ring[entry];
1058		if (np->last_tx) {
1059			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1060				entry*sizeof(struct netdev_desc));
1061		}
1062		np->last_tx = txdesc;
1063	}
1064	/* Indicate the latest descriptor of tx ring */
1065	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1066
1067	if (ioread32 (np->base + TxListPtr) == 0)
1068		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1069			np->base + TxListPtr);
1070}
1071
1072static netdev_tx_t
1073start_tx (struct sk_buff *skb, struct net_device *dev)
1074{
1075	struct netdev_private *np = netdev_priv(dev);
1076	struct netdev_desc *txdesc;
1077	unsigned entry;
1078
1079	/* Calculate the next Tx descriptor entry. */
1080	entry = np->cur_tx % TX_RING_SIZE;
1081	np->tx_skbuff[entry] = skb;
1082	txdesc = &np->tx_ring[entry];
1083
1084	txdesc->next_desc = 0;
1085	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1086	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1087				skb->data, skb->len, DMA_TO_DEVICE));
1088	if (dma_mapping_error(&np->pci_dev->dev,
1089				txdesc->frag[0].addr))
1090			goto drop_frame;
1091	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1092
1093	/* Increment cur_tx before tasklet_schedule() */
1094	np->cur_tx++;
1095	mb();
1096	/* Schedule a tx_poll() task */
1097	tasklet_schedule(&np->tx_tasklet);
1098
1099	/* On some architectures: explicitly flush cache lines here. */
1100	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1101	    !netif_queue_stopped(dev)) {
1102		/* do nothing */
1103	} else {
1104		netif_stop_queue (dev);
1105	}
1106	if (netif_msg_tx_queued(np)) {
1107		printk (KERN_DEBUG
1108			"%s: Transmit frame #%d queued in slot %d.\n",
1109			dev->name, np->cur_tx, entry);
1110	}
1111	return NETDEV_TX_OK;
1112
1113drop_frame:
1114	dev_kfree_skb(skb);
1115	np->tx_skbuff[entry] = NULL;
1116	dev->stats.tx_dropped++;
1117	return NETDEV_TX_OK;
1118}
1119
1120/* Reset hardware tx and free all of tx buffers */
1121static int
1122reset_tx (struct net_device *dev)
1123{
1124	struct netdev_private *np = netdev_priv(dev);
1125	void __iomem *ioaddr = np->base;
1126	struct sk_buff *skb;
1127	int i;
1128
1129	/* Reset tx logic, TxListPtr will be cleaned */
1130	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1131	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1132
1133	/* free all tx skbuff */
1134	for (i = 0; i < TX_RING_SIZE; i++) {
1135		np->tx_ring[i].next_desc = 0;
1136
1137		skb = np->tx_skbuff[i];
1138		if (skb) {
1139			dma_unmap_single(&np->pci_dev->dev,
1140				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1141				skb->len, DMA_TO_DEVICE);
1142			dev_kfree_skb_any(skb);
1143			np->tx_skbuff[i] = NULL;
1144			dev->stats.tx_dropped++;
1145		}
1146	}
1147	np->cur_tx = np->dirty_tx = 0;
1148	np->cur_task = 0;
1149
1150	np->last_tx = NULL;
1151	iowrite8(127, ioaddr + TxDMAPollPeriod);
1152
1153	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1154	return 0;
1155}
1156
1157/* The interrupt handler cleans up after the Tx thread,
1158   and schedule a Rx thread work */
1159static irqreturn_t intr_handler(int irq, void *dev_instance)
1160{
1161	struct net_device *dev = (struct net_device *)dev_instance;
1162	struct netdev_private *np = netdev_priv(dev);
1163	void __iomem *ioaddr = np->base;
1164	int hw_frame_id;
1165	int tx_cnt;
1166	int tx_status;
1167	int handled = 0;
1168	int i;
1169
1170
1171	do {
1172		int intr_status = ioread16(ioaddr + IntrStatus);
1173		iowrite16(intr_status, ioaddr + IntrStatus);
1174
1175		if (netif_msg_intr(np))
1176			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1177				   dev->name, intr_status);
1178
1179		if (!(intr_status & DEFAULT_INTR))
1180			break;
1181
1182		handled = 1;
1183
1184		if (intr_status & (IntrRxDMADone)) {
1185			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1186					ioaddr + IntrEnable);
1187			if (np->budget < 0)
1188				np->budget = RX_BUDGET;
1189			tasklet_schedule(&np->rx_tasklet);
1190		}
1191		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1192			tx_status = ioread16 (ioaddr + TxStatus);
1193			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1194				if (netif_msg_tx_done(np))
1195					printk
1196					    ("%s: Transmit status is %2.2x.\n",
1197				     	dev->name, tx_status);
1198				if (tx_status & 0x1e) {
1199					if (netif_msg_tx_err(np))
1200						printk("%s: Transmit error status %4.4x.\n",
1201							   dev->name, tx_status);
1202					dev->stats.tx_errors++;
1203					if (tx_status & 0x10)
1204						dev->stats.tx_fifo_errors++;
1205					if (tx_status & 0x08)
1206						dev->stats.collisions++;
1207					if (tx_status & 0x04)
1208						dev->stats.tx_fifo_errors++;
1209					if (tx_status & 0x02)
1210						dev->stats.tx_window_errors++;
1211
1212					/*
1213					** This reset has been verified on
1214					** DFE-580TX boards ! phdm@macqel.be.
1215					*/
1216					if (tx_status & 0x10) {	/* TxUnderrun */
1217						/* Restart Tx FIFO and transmitter */
1218						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1219						/* No need to reset the Tx pointer here */
1220					}
1221					/* Restart the Tx. Need to make sure tx enabled */
1222					i = 10;
1223					do {
1224						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1225						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1226							break;
1227						mdelay(1);
1228					} while (--i);
1229				}
1230				/* Yup, this is a documentation bug.  It cost me *hours*. */
1231				iowrite16 (0, ioaddr + TxStatus);
1232				if (tx_cnt < 0) {
1233					iowrite32(5000, ioaddr + DownCounter);
1234					break;
1235				}
1236				tx_status = ioread16 (ioaddr + TxStatus);
1237			}
1238			hw_frame_id = (tx_status >> 8) & 0xff;
1239		} else 	{
1240			hw_frame_id = ioread8(ioaddr + TxFrameId);
1241		}
1242
1243		if (np->pci_dev->revision >= 0x14) {
1244			spin_lock(&np->lock);
1245			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1246				int entry = np->dirty_tx % TX_RING_SIZE;
1247				struct sk_buff *skb;
1248				int sw_frame_id;
1249				sw_frame_id = (le32_to_cpu(
1250					np->tx_ring[entry].status) >> 2) & 0xff;
1251				if (sw_frame_id == hw_frame_id &&
1252					!(le32_to_cpu(np->tx_ring[entry].status)
1253					& 0x00010000))
1254						break;
1255				if (sw_frame_id == (hw_frame_id + 1) %
1256					TX_RING_SIZE)
1257						break;
1258				skb = np->tx_skbuff[entry];
1259				/* Free the original skb. */
1260				dma_unmap_single(&np->pci_dev->dev,
1261					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1262					skb->len, DMA_TO_DEVICE);
1263				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264				np->tx_skbuff[entry] = NULL;
1265				np->tx_ring[entry].frag[0].addr = 0;
1266				np->tx_ring[entry].frag[0].length = 0;
1267			}
1268			spin_unlock(&np->lock);
1269		} else {
1270			spin_lock(&np->lock);
1271			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272				int entry = np->dirty_tx % TX_RING_SIZE;
1273				struct sk_buff *skb;
1274				if (!(le32_to_cpu(np->tx_ring[entry].status)
1275							& 0x00010000))
1276					break;
1277				skb = np->tx_skbuff[entry];
1278				/* Free the original skb. */
1279				dma_unmap_single(&np->pci_dev->dev,
1280					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1281					skb->len, DMA_TO_DEVICE);
1282				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1283				np->tx_skbuff[entry] = NULL;
1284				np->tx_ring[entry].frag[0].addr = 0;
1285				np->tx_ring[entry].frag[0].length = 0;
1286			}
1287			spin_unlock(&np->lock);
1288		}
1289
1290		if (netif_queue_stopped(dev) &&
1291			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1292			/* The ring is no longer full, clear busy flag. */
1293			netif_wake_queue (dev);
1294		}
1295		/* Abnormal error summary/uncommon events handlers. */
1296		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1297			netdev_error(dev, intr_status);
1298	} while (0);
1299	if (netif_msg_intr(np))
1300		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1301			   dev->name, ioread16(ioaddr + IntrStatus));
1302	return IRQ_RETVAL(handled);
1303}
1304
1305static void rx_poll(unsigned long data)
1306{
1307	struct net_device *dev = (struct net_device *)data;
1308	struct netdev_private *np = netdev_priv(dev);
1309	int entry = np->cur_rx % RX_RING_SIZE;
1310	int boguscnt = np->budget;
1311	void __iomem *ioaddr = np->base;
1312	int received = 0;
1313
1314	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1315	while (1) {
1316		struct netdev_desc *desc = &(np->rx_ring[entry]);
1317		u32 frame_status = le32_to_cpu(desc->status);
1318		int pkt_len;
1319
1320		if (--boguscnt < 0) {
1321			goto not_done;
1322		}
1323		if (!(frame_status & DescOwn))
1324			break;
1325		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1326		if (netif_msg_rx_status(np))
1327			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1328				   frame_status);
1329		if (frame_status & 0x001f4000) {
1330			/* There was a error. */
1331			if (netif_msg_rx_err(np))
1332				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1333					   frame_status);
1334			dev->stats.rx_errors++;
1335			if (frame_status & 0x00100000)
1336				dev->stats.rx_length_errors++;
1337			if (frame_status & 0x00010000)
1338				dev->stats.rx_fifo_errors++;
1339			if (frame_status & 0x00060000)
1340				dev->stats.rx_frame_errors++;
1341			if (frame_status & 0x00080000)
1342				dev->stats.rx_crc_errors++;
1343			if (frame_status & 0x00100000) {
1344				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1345					   " status %8.8x.\n",
1346					   dev->name, frame_status);
1347			}
1348		} else {
1349			struct sk_buff *skb;
1350#ifndef final_version
1351			if (netif_msg_rx_status(np))
1352				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1353					   ", bogus_cnt %d.\n",
1354					   pkt_len, boguscnt);
1355#endif
1356			/* Check if the packet is long enough to accept without copying
1357			   to a minimally-sized skbuff. */
1358			if (pkt_len < rx_copybreak &&
1359			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1360				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1361				dma_sync_single_for_cpu(&np->pci_dev->dev,
1362						le32_to_cpu(desc->frag[0].addr),
1363						np->rx_buf_sz, DMA_FROM_DEVICE);
1364				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1365				dma_sync_single_for_device(&np->pci_dev->dev,
1366						le32_to_cpu(desc->frag[0].addr),
1367						np->rx_buf_sz, DMA_FROM_DEVICE);
1368				skb_put(skb, pkt_len);
1369			} else {
1370				dma_unmap_single(&np->pci_dev->dev,
1371					le32_to_cpu(desc->frag[0].addr),
1372					np->rx_buf_sz, DMA_FROM_DEVICE);
1373				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1374				np->rx_skbuff[entry] = NULL;
1375			}
1376			skb->protocol = eth_type_trans(skb, dev);
1377			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1378			netif_rx(skb);
1379		}
1380		entry = (entry + 1) % RX_RING_SIZE;
1381		received++;
1382	}
1383	np->cur_rx = entry;
1384	refill_rx (dev);
1385	np->budget -= received;
1386	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1387	return;
1388
1389not_done:
1390	np->cur_rx = entry;
1391	refill_rx (dev);
1392	if (!received)
1393		received = 1;
1394	np->budget -= received;
1395	if (np->budget <= 0)
1396		np->budget = RX_BUDGET;
1397	tasklet_schedule(&np->rx_tasklet);
1398}
1399
1400static void refill_rx (struct net_device *dev)
1401{
1402	struct netdev_private *np = netdev_priv(dev);
1403	int entry;
1404	int cnt = 0;
1405
1406	/* Refill the Rx ring buffers. */
1407	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1408		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1409		struct sk_buff *skb;
1410		entry = np->dirty_rx % RX_RING_SIZE;
1411		if (np->rx_skbuff[entry] == NULL) {
1412			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1413			np->rx_skbuff[entry] = skb;
1414			if (skb == NULL)
1415				break;		/* Better luck next round. */
1416			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1417			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1418				dma_map_single(&np->pci_dev->dev, skb->data,
1419					np->rx_buf_sz, DMA_FROM_DEVICE));
1420			if (dma_mapping_error(&np->pci_dev->dev,
1421				    np->rx_ring[entry].frag[0].addr)) {
1422			    dev_kfree_skb_irq(skb);
1423			    np->rx_skbuff[entry] = NULL;
1424			    break;
1425			}
1426		}
1427		/* Perhaps we need not reset this field. */
1428		np->rx_ring[entry].frag[0].length =
1429			cpu_to_le32(np->rx_buf_sz | LastFrag);
1430		np->rx_ring[entry].status = 0;
1431		cnt++;
1432	}
1433}
1434static void netdev_error(struct net_device *dev, int intr_status)
1435{
1436	struct netdev_private *np = netdev_priv(dev);
1437	void __iomem *ioaddr = np->base;
1438	u16 mii_ctl, mii_advertise, mii_lpa;
1439	int speed;
1440
1441	if (intr_status & LinkChange) {
1442		if (mdio_wait_link(dev, 10) == 0) {
1443			printk(KERN_INFO "%s: Link up\n", dev->name);
1444			if (np->an_enable) {
1445				mii_advertise = mdio_read(dev, np->phys[0],
1446							   MII_ADVERTISE);
1447				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1448				mii_advertise &= mii_lpa;
1449				printk(KERN_INFO "%s: Link changed: ",
1450					dev->name);
1451				if (mii_advertise & ADVERTISE_100FULL) {
1452					np->speed = 100;
1453					printk("100Mbps, full duplex\n");
1454				} else if (mii_advertise & ADVERTISE_100HALF) {
1455					np->speed = 100;
1456					printk("100Mbps, half duplex\n");
1457				} else if (mii_advertise & ADVERTISE_10FULL) {
1458					np->speed = 10;
1459					printk("10Mbps, full duplex\n");
1460				} else if (mii_advertise & ADVERTISE_10HALF) {
1461					np->speed = 10;
1462					printk("10Mbps, half duplex\n");
1463				} else
1464					printk("\n");
1465
1466			} else {
1467				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1468				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1469				np->speed = speed;
1470				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1471					dev->name, speed);
1472				printk("%s duplex.\n",
1473					(mii_ctl & BMCR_FULLDPLX) ?
1474						"full" : "half");
1475			}
1476			check_duplex(dev);
1477			if (np->flowctrl && np->mii_if.full_duplex) {
1478				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1479					ioaddr + MulticastFilter1+2);
1480				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1481					ioaddr + MACCtrl0);
1482			}
1483			netif_carrier_on(dev);
1484		} else {
1485			printk(KERN_INFO "%s: Link down\n", dev->name);
1486			netif_carrier_off(dev);
1487		}
1488	}
1489	if (intr_status & StatsMax) {
1490		get_stats(dev);
1491	}
1492	if (intr_status & IntrPCIErr) {
1493		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1494			   dev->name, intr_status);
1495		/* We must do a global reset of DMA to continue. */
1496	}
1497}
1498
1499static struct net_device_stats *get_stats(struct net_device *dev)
1500{
1501	struct netdev_private *np = netdev_priv(dev);
1502	void __iomem *ioaddr = np->base;
1503	unsigned long flags;
1504	u8 late_coll, single_coll, mult_coll;
1505
1506	spin_lock_irqsave(&np->statlock, flags);
1507	/* The chip only need report frame silently dropped. */
1508	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1509	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1510	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1511	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1512
1513	mult_coll = ioread8(ioaddr + StatsMultiColl);
1514	np->xstats.tx_multiple_collisions += mult_coll;
1515	single_coll = ioread8(ioaddr + StatsOneColl);
1516	np->xstats.tx_single_collisions += single_coll;
1517	late_coll = ioread8(ioaddr + StatsLateColl);
1518	np->xstats.tx_late_collisions += late_coll;
1519	dev->stats.collisions += mult_coll
1520		+ single_coll
1521		+ late_coll;
1522
1523	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1524	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1525	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1526	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1527	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1528	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1529	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1530
1531	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1532	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1533	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1534	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1535
1536	spin_unlock_irqrestore(&np->statlock, flags);
1537
1538	return &dev->stats;
1539}
1540
1541static void set_rx_mode(struct net_device *dev)
1542{
1543	struct netdev_private *np = netdev_priv(dev);
1544	void __iomem *ioaddr = np->base;
1545	u16 mc_filter[4];			/* Multicast hash filter */
1546	u32 rx_mode;
1547	int i;
1548
1549	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1550		memset(mc_filter, 0xff, sizeof(mc_filter));
1551		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1552	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1553		   (dev->flags & IFF_ALLMULTI)) {
1554		/* Too many to match, or accept all multicasts. */
1555		memset(mc_filter, 0xff, sizeof(mc_filter));
1556		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1557	} else if (!netdev_mc_empty(dev)) {
1558		struct netdev_hw_addr *ha;
1559		int bit;
1560		int index;
1561		int crc;
1562		memset (mc_filter, 0, sizeof (mc_filter));
1563		netdev_for_each_mc_addr(ha, dev) {
1564			crc = ether_crc_le(ETH_ALEN, ha->addr);
1565			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1566				if (crc & 0x80000000) index |= 1 << bit;
1567			mc_filter[index/16] |= (1 << (index % 16));
1568		}
1569		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1570	} else {
1571		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1572		return;
1573	}
1574	if (np->mii_if.full_duplex && np->flowctrl)
1575		mc_filter[3] |= 0x0200;
1576
1577	for (i = 0; i < 4; i++)
1578		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1579	iowrite8(rx_mode, ioaddr + RxMode);
1580}
1581
1582static int __set_mac_addr(struct net_device *dev)
1583{
1584	struct netdev_private *np = netdev_priv(dev);
1585	u16 addr16;
1586
1587	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1588	iowrite16(addr16, np->base + StationAddr);
1589	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1590	iowrite16(addr16, np->base + StationAddr+2);
1591	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1592	iowrite16(addr16, np->base + StationAddr+4);
1593	return 0;
1594}
1595
1596/* Invoked with rtnl_lock held */
1597static int sundance_set_mac_addr(struct net_device *dev, void *data)
1598{
1599	const struct sockaddr *addr = data;
1600
1601	if (!is_valid_ether_addr(addr->sa_data))
1602		return -EADDRNOTAVAIL;
1603	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1604	__set_mac_addr(dev);
1605
1606	return 0;
1607}
1608
1609static const struct {
1610	const char name[ETH_GSTRING_LEN];
1611} sundance_stats[] = {
1612	{ "tx_multiple_collisions" },
1613	{ "tx_single_collisions" },
1614	{ "tx_late_collisions" },
1615	{ "tx_deferred" },
1616	{ "tx_deferred_excessive" },
1617	{ "tx_aborted" },
1618	{ "tx_bcasts" },
1619	{ "rx_bcasts" },
1620	{ "tx_mcasts" },
1621	{ "rx_mcasts" },
1622};
1623
1624static int check_if_running(struct net_device *dev)
1625{
1626	if (!netif_running(dev))
1627		return -EINVAL;
1628	return 0;
1629}
1630
1631static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1632{
1633	struct netdev_private *np = netdev_priv(dev);
1634	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1635	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1636	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1637}
1638
1639static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
1640{
1641	struct netdev_private *np = netdev_priv(dev);
1642	spin_lock_irq(&np->lock);
1643	mii_ethtool_gset(&np->mii_if, ecmd);
1644	spin_unlock_irq(&np->lock);
1645	return 0;
1646}
1647
1648static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
1649{
1650	struct netdev_private *np = netdev_priv(dev);
1651	int res;
1652	spin_lock_irq(&np->lock);
1653	res = mii_ethtool_sset(&np->mii_if, ecmd);
1654	spin_unlock_irq(&np->lock);
1655	return res;
1656}
1657
1658static int nway_reset(struct net_device *dev)
1659{
1660	struct netdev_private *np = netdev_priv(dev);
1661	return mii_nway_restart(&np->mii_if);
1662}
1663
1664static u32 get_link(struct net_device *dev)
1665{
1666	struct netdev_private *np = netdev_priv(dev);
1667	return mii_link_ok(&np->mii_if);
1668}
1669
1670static u32 get_msglevel(struct net_device *dev)
1671{
1672	struct netdev_private *np = netdev_priv(dev);
1673	return np->msg_enable;
1674}
1675
1676static void set_msglevel(struct net_device *dev, u32 val)
1677{
1678	struct netdev_private *np = netdev_priv(dev);
1679	np->msg_enable = val;
1680}
1681
1682static void get_strings(struct net_device *dev, u32 stringset,
1683		u8 *data)
1684{
1685	if (stringset == ETH_SS_STATS)
1686		memcpy(data, sundance_stats, sizeof(sundance_stats));
1687}
1688
1689static int get_sset_count(struct net_device *dev, int sset)
1690{
1691	switch (sset) {
1692	case ETH_SS_STATS:
1693		return ARRAY_SIZE(sundance_stats);
1694	default:
1695		return -EOPNOTSUPP;
1696	}
1697}
1698
1699static void get_ethtool_stats(struct net_device *dev,
1700		struct ethtool_stats *stats, u64 *data)
1701{
1702	struct netdev_private *np = netdev_priv(dev);
1703	int i = 0;
1704
1705	get_stats(dev);
1706	data[i++] = np->xstats.tx_multiple_collisions;
1707	data[i++] = np->xstats.tx_single_collisions;
1708	data[i++] = np->xstats.tx_late_collisions;
1709	data[i++] = np->xstats.tx_deferred;
1710	data[i++] = np->xstats.tx_deferred_excessive;
1711	data[i++] = np->xstats.tx_aborted;
1712	data[i++] = np->xstats.tx_bcasts;
1713	data[i++] = np->xstats.rx_bcasts;
1714	data[i++] = np->xstats.tx_mcasts;
1715	data[i++] = np->xstats.rx_mcasts;
1716}
1717
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718static const struct ethtool_ops ethtool_ops = {
1719	.begin = check_if_running,
1720	.get_drvinfo = get_drvinfo,
1721	.get_settings = get_settings,
1722	.set_settings = set_settings,
1723	.nway_reset = nway_reset,
1724	.get_link = get_link,
 
 
1725	.get_msglevel = get_msglevel,
1726	.set_msglevel = set_msglevel,
1727	.get_strings = get_strings,
1728	.get_sset_count = get_sset_count,
1729	.get_ethtool_stats = get_ethtool_stats,
 
 
1730};
1731
1732static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1733{
1734	struct netdev_private *np = netdev_priv(dev);
1735	int rc;
1736
1737	if (!netif_running(dev))
1738		return -EINVAL;
1739
1740	spin_lock_irq(&np->lock);
1741	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1742	spin_unlock_irq(&np->lock);
1743
1744	return rc;
1745}
1746
1747static int netdev_close(struct net_device *dev)
1748{
1749	struct netdev_private *np = netdev_priv(dev);
1750	void __iomem *ioaddr = np->base;
1751	struct sk_buff *skb;
1752	int i;
1753
1754	/* Wait and kill tasklet */
1755	tasklet_kill(&np->rx_tasklet);
1756	tasklet_kill(&np->tx_tasklet);
1757	np->cur_tx = 0;
1758	np->dirty_tx = 0;
1759	np->cur_task = 0;
1760	np->last_tx = NULL;
1761
1762	netif_stop_queue(dev);
1763
1764	if (netif_msg_ifdown(np)) {
1765		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1766			   "Rx %4.4x Int %2.2x.\n",
1767			   dev->name, ioread8(ioaddr + TxStatus),
1768			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1769		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1770			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1771	}
1772
1773	/* Disable interrupts by clearing the interrupt mask. */
1774	iowrite16(0x0000, ioaddr + IntrEnable);
1775
1776	/* Disable Rx and Tx DMA for safely release resource */
1777	iowrite32(0x500, ioaddr + DMACtrl);
1778
1779	/* Stop the chip's Tx and Rx processes. */
1780	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1781
1782    	for (i = 2000; i > 0; i--) {
1783 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1784			break;
1785		mdelay(1);
1786    	}
1787
1788    	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1789			ioaddr + ASIC_HI_WORD(ASICCtrl));
1790
1791    	for (i = 2000; i > 0; i--) {
1792		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1793			break;
1794		mdelay(1);
1795    	}
1796
1797#ifdef __i386__
1798	if (netif_msg_hw(np)) {
1799		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1800			   (int)(np->tx_ring_dma));
1801		for (i = 0; i < TX_RING_SIZE; i++)
1802			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1803				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1804				   np->tx_ring[i].frag[0].length);
1805		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1806			   (int)(np->rx_ring_dma));
1807		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1808			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1809				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1810				   np->rx_ring[i].frag[0].length);
1811		}
1812	}
1813#endif /* __i386__ debugging only */
1814
1815	free_irq(np->pci_dev->irq, dev);
1816
1817	del_timer_sync(&np->timer);
1818
1819	/* Free all the skbuffs in the Rx queue. */
1820	for (i = 0; i < RX_RING_SIZE; i++) {
1821		np->rx_ring[i].status = 0;
1822		skb = np->rx_skbuff[i];
1823		if (skb) {
1824			dma_unmap_single(&np->pci_dev->dev,
1825				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1826				np->rx_buf_sz, DMA_FROM_DEVICE);
1827			dev_kfree_skb(skb);
1828			np->rx_skbuff[i] = NULL;
1829		}
1830		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1831	}
1832	for (i = 0; i < TX_RING_SIZE; i++) {
1833		np->tx_ring[i].next_desc = 0;
1834		skb = np->tx_skbuff[i];
1835		if (skb) {
1836			dma_unmap_single(&np->pci_dev->dev,
1837				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1838				skb->len, DMA_TO_DEVICE);
1839			dev_kfree_skb(skb);
1840			np->tx_skbuff[i] = NULL;
1841		}
1842	}
1843
1844	return 0;
1845}
1846
1847static void __devexit sundance_remove1 (struct pci_dev *pdev)
1848{
1849	struct net_device *dev = pci_get_drvdata(pdev);
1850
1851	if (dev) {
1852	    struct netdev_private *np = netdev_priv(dev);
1853	    unregister_netdev(dev);
1854	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1855		    np->rx_ring, np->rx_ring_dma);
1856	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1857		    np->tx_ring, np->tx_ring_dma);
1858	    pci_iounmap(pdev, np->base);
1859	    pci_release_regions(pdev);
1860	    free_netdev(dev);
1861	    pci_set_drvdata(pdev, NULL);
1862	}
1863}
1864
1865#ifdef CONFIG_PM
1866
1867static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1868{
1869	struct net_device *dev = pci_get_drvdata(pci_dev);
 
 
1870
1871	if (!netif_running(dev))
1872		return 0;
1873
1874	netdev_close(dev);
1875	netif_device_detach(dev);
1876
1877	pci_save_state(pci_dev);
1878	pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
 
 
 
 
1879
1880	return 0;
1881}
1882
1883static int sundance_resume(struct pci_dev *pci_dev)
1884{
1885	struct net_device *dev = pci_get_drvdata(pci_dev);
1886	int err = 0;
1887
1888	if (!netif_running(dev))
1889		return 0;
1890
1891	pci_set_power_state(pci_dev, PCI_D0);
1892	pci_restore_state(pci_dev);
1893
1894	err = netdev_open(dev);
1895	if (err) {
1896		printk(KERN_ERR "%s: Can't resume interface!\n",
1897				dev->name);
1898		goto out;
1899	}
1900
1901	netif_device_attach(dev);
1902
1903out:
1904	return err;
1905}
1906
1907#endif /* CONFIG_PM */
1908
1909static struct pci_driver sundance_driver = {
1910	.name		= DRV_NAME,
1911	.id_table	= sundance_pci_tbl,
1912	.probe		= sundance_probe1,
1913	.remove		= __devexit_p(sundance_remove1),
1914#ifdef CONFIG_PM
1915	.suspend	= sundance_suspend,
1916	.resume		= sundance_resume,
1917#endif /* CONFIG_PM */
1918};
1919
1920static int __init sundance_init(void)
1921{
1922/* when a module, this is printed whether or not devices are found in probe */
1923#ifdef MODULE
1924	printk(version);
1925#endif
1926	return pci_register_driver(&sundance_driver);
1927}
1928
1929static void __exit sundance_exit(void)
1930{
1931	pci_unregister_driver(&sundance_driver);
1932}
1933
1934module_init(sundance_init);
1935module_exit(sundance_exit);
1936
1937