Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <asm/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] __devinitconst =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(unsigned long data);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_multicast_list = set_rx_mode,
 363	.ndo_change_mtu		= eth_change_mtu,
 364	.ndo_validate_addr	= eth_validate_addr,
 365	.ndo_set_mac_address 	= eth_mac_addr,
 366	.ndo_do_ioctl 		= netdev_ioctl,
 367	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 368};
 369
 370static int __devinit yellowfin_init_one(struct pci_dev *pdev,
 371					const struct pci_device_id *ent)
 372{
 373	struct net_device *dev;
 374	struct yellowfin_private *np;
 375	int irq;
 376	int chip_idx = ent->driver_data;
 377	static int find_cnt;
 378	void __iomem *ioaddr;
 379	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 380	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 381        void *ring_space;
 382        dma_addr_t ring_dma;
 383#ifdef USE_IO_OPS
 384	int bar = 0;
 385#else
 386	int bar = 1;
 387#endif
 388
 389/* when built into the kernel, we only print version if device is found */
 390#ifndef MODULE
 391	static int printed_version;
 392	if (!printed_version++)
 393		printk(version);
 394#endif
 395
 396	i = pci_enable_device(pdev);
 397	if (i) return i;
 398
 399	dev = alloc_etherdev(sizeof(*np));
 400	if (!dev) {
 401		pr_err("cannot allocate ethernet device\n");
 402		return -ENOMEM;
 403	}
 404	SET_NETDEV_DEV(dev, &pdev->dev);
 405
 406	np = netdev_priv(dev);
 407
 408	if (pci_request_regions(pdev, DRV_NAME))
 409		goto err_out_free_netdev;
 410
 411	pci_set_master (pdev);
 412
 413	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 414	if (!ioaddr)
 415		goto err_out_free_res;
 416
 417	irq = pdev->irq;
 418
 419	if (drv_flags & DontUseEeprom)
 420		for (i = 0; i < 6; i++)
 421			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 422	else {
 423		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 424		for (i = 0; i < 6; i++)
 425			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 426	}
 427
 428	/* Reset the chip. */
 429	iowrite32(0x80000000, ioaddr + DMACtrl);
 430
 431	dev->base_addr = (unsigned long)ioaddr;
 432	dev->irq = irq;
 433
 434	pci_set_drvdata(pdev, dev);
 435	spin_lock_init(&np->lock);
 436
 437	np->pci_dev = pdev;
 438	np->chip_id = chip_idx;
 439	np->drv_flags = drv_flags;
 440	np->base = ioaddr;
 441
 442	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 443	if (!ring_space)
 444		goto err_out_cleardev;
 445	np->tx_ring = ring_space;
 446	np->tx_ring_dma = ring_dma;
 447
 448	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 449	if (!ring_space)
 450		goto err_out_unmap_tx;
 451	np->rx_ring = ring_space;
 452	np->rx_ring_dma = ring_dma;
 453
 454	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 455	if (!ring_space)
 456		goto err_out_unmap_rx;
 457	np->tx_status = ring_space;
 458	np->tx_status_dma = ring_dma;
 459
 460	if (dev->mem_start)
 461		option = dev->mem_start;
 462
 463	/* The lower four bits are the media type. */
 464	if (option > 0) {
 465		if (option & 0x200)
 466			np->full_duplex = 1;
 467		np->default_port = option & 15;
 468		if (np->default_port)
 469			np->medialock = 1;
 470	}
 471	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 472		np->full_duplex = 1;
 473
 474	if (np->full_duplex)
 475		np->duplex_lock = 1;
 476
 477	/* The Yellowfin-specific entries in the device structure. */
 478	dev->netdev_ops = &netdev_ops;
 479	SET_ETHTOOL_OPS(dev, &ethtool_ops);
 480	dev->watchdog_timeo = TX_TIMEOUT;
 481
 482	if (mtu)
 483		dev->mtu = mtu;
 484
 485	i = register_netdev(dev);
 486	if (i)
 487		goto err_out_unmap_status;
 488
 489	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 490		    pci_id_tbl[chip_idx].name,
 491		    ioread32(ioaddr + ChipRev), ioaddr,
 492		    dev->dev_addr, irq);
 493
 494	if (np->drv_flags & HasMII) {
 495		int phy, phy_idx = 0;
 496		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 497			int mii_status = mdio_read(ioaddr, phy, 1);
 498			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 499				np->phys[phy_idx++] = phy;
 500				np->advertising = mdio_read(ioaddr, phy, 4);
 501				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 502					    phy, mii_status, np->advertising);
 503			}
 504		}
 505		np->mii_cnt = phy_idx;
 506	}
 507
 508	find_cnt++;
 509
 510	return 0;
 511
 512err_out_unmap_status:
 513        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 514		np->tx_status_dma);
 515err_out_unmap_rx:
 516        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 517err_out_unmap_tx:
 518        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 519err_out_cleardev:
 520	pci_set_drvdata(pdev, NULL);
 521	pci_iounmap(pdev, ioaddr);
 522err_out_free_res:
 523	pci_release_regions(pdev);
 524err_out_free_netdev:
 525	free_netdev (dev);
 526	return -ENODEV;
 527}
 528
 529static int __devinit read_eeprom(void __iomem *ioaddr, int location)
 530{
 531	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 532
 533	iowrite8(location, ioaddr + EEAddr);
 534	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 535	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 536		;
 537	return ioread8(ioaddr + EERead);
 538}
 539
 540/* MII Managemen Data I/O accesses.
 541   These routines assume the MDIO controller is idle, and do not exit until
 542   the command is finished. */
 543
 544static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 545{
 546	int i;
 547
 548	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 549	iowrite16(1, ioaddr + MII_Cmd);
 550	for (i = 10000; i >= 0; i--)
 551		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 552			break;
 553	return ioread16(ioaddr + MII_Rd_Data);
 554}
 555
 556static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 557{
 558	int i;
 559
 560	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 561	iowrite16(value, ioaddr + MII_Wr_Data);
 562
 563	/* Wait for the command to finish. */
 564	for (i = 10000; i >= 0; i--)
 565		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 566			break;
 567}
 568
 569
 570static int yellowfin_open(struct net_device *dev)
 571{
 572	struct yellowfin_private *yp = netdev_priv(dev);
 573	void __iomem *ioaddr = yp->base;
 574	int i, ret;
 575
 576	/* Reset the chip. */
 577	iowrite32(0x80000000, ioaddr + DMACtrl);
 578
 579	ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 580	if (ret)
 581		return ret;
 582
 583	if (yellowfin_debug > 1)
 584		netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
 585			      __func__, dev->irq);
 586
 587	ret = yellowfin_init_ring(dev);
 588	if (ret) {
 589		free_irq(dev->irq, dev);
 590		return ret;
 591	}
 592
 593	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 594	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 595
 596	for (i = 0; i < 6; i++)
 597		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 598
 599	/* Set up various condition 'select' registers.
 600	   There are no options here. */
 601	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 602	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 603	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 604	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 605	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 606	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 607
 608	/* Initialize other registers: with so many this eventually this will
 609	   converted to an offset/value list. */
 610	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 611	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 612	/* Enable automatic generation of flow control frames, period 0xffff. */
 613	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 614
 615	yp->tx_threshold = 32;
 616	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 617
 618	if (dev->if_port == 0)
 619		dev->if_port = yp->default_port;
 620
 621	netif_start_queue(dev);
 622
 623	/* Setting the Rx mode will start the Rx process. */
 624	if (yp->drv_flags & IsGigabit) {
 625		/* We are always in full-duplex mode with gigabit! */
 626		yp->full_duplex = 1;
 627		iowrite16(0x01CF, ioaddr + Cnfg);
 628	} else {
 629		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 630		iowrite16(0x1018, ioaddr + FrameGap1);
 631		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 632	}
 633	set_rx_mode(dev);
 634
 635	/* Enable interrupts by setting the interrupt mask. */
 636	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 637	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 638	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 639	iowrite32(0x80008000, ioaddr + TxCtrl);
 640
 641	if (yellowfin_debug > 2) {
 642		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 643	}
 644
 645	/* Set the timer to check for link beat. */
 646	init_timer(&yp->timer);
 647	yp->timer.expires = jiffies + 3*HZ;
 648	yp->timer.data = (unsigned long)dev;
 649	yp->timer.function = yellowfin_timer;				/* timer handler */
 650	add_timer(&yp->timer);
 651
 652	return 0;
 653}
 654
 655static void yellowfin_timer(unsigned long data)
 656{
 657	struct net_device *dev = (struct net_device *)data;
 658	struct yellowfin_private *yp = netdev_priv(dev);
 659	void __iomem *ioaddr = yp->base;
 660	int next_tick = 60*HZ;
 661
 662	if (yellowfin_debug > 3) {
 663		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 664			      ioread16(ioaddr + IntrStatus));
 665	}
 666
 667	if (yp->mii_cnt) {
 668		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 669		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 670		int negotiated = lpa & yp->advertising;
 671		if (yellowfin_debug > 1)
 672			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 673				      yp->phys[0], bmsr, lpa);
 674
 675		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 676
 677		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 678
 679		if (bmsr & BMSR_LSTATUS)
 680			next_tick = 60*HZ;
 681		else
 682			next_tick = 3*HZ;
 683	}
 684
 685	yp->timer.expires = jiffies + next_tick;
 686	add_timer(&yp->timer);
 687}
 688
 689static void yellowfin_tx_timeout(struct net_device *dev)
 690{
 691	struct yellowfin_private *yp = netdev_priv(dev);
 692	void __iomem *ioaddr = yp->base;
 693
 694	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 695		    yp->cur_tx, yp->dirty_tx,
 696		    ioread32(ioaddr + TxStatus),
 697		    ioread32(ioaddr + RxStatus));
 698
 699	/* Note: these should be KERN_DEBUG. */
 700	if (yellowfin_debug) {
 701		int i;
 702		pr_warning("  Rx ring %p: ", yp->rx_ring);
 703		for (i = 0; i < RX_RING_SIZE; i++)
 704			pr_cont(" %08x", yp->rx_ring[i].result_status);
 705		pr_cont("\n");
 706		pr_warning("  Tx ring %p: ", yp->tx_ring);
 707		for (i = 0; i < TX_RING_SIZE; i++)
 708			pr_cont(" %04x /%08x",
 709			       yp->tx_status[i].tx_errs,
 710			       yp->tx_ring[i].result_status);
 711		pr_cont("\n");
 712	}
 713
 714	/* If the hardware is found to hang regularly, we will update the code
 715	   to reinitialize the chip here. */
 716	dev->if_port = 0;
 717
 718	/* Wake the potentially-idle transmit channel. */
 719	iowrite32(0x10001000, yp->base + TxCtrl);
 720	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 721		netif_wake_queue (dev);		/* Typical path */
 722
 723	dev->trans_start = jiffies; /* prevent tx timeout */
 724	dev->stats.tx_errors++;
 725}
 726
 727/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 728static int yellowfin_init_ring(struct net_device *dev)
 729{
 730	struct yellowfin_private *yp = netdev_priv(dev);
 731	int i, j;
 732
 733	yp->tx_full = 0;
 734	yp->cur_rx = yp->cur_tx = 0;
 735	yp->dirty_tx = 0;
 736
 737	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 738
 739	for (i = 0; i < RX_RING_SIZE; i++) {
 740		yp->rx_ring[i].dbdma_cmd =
 741			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 742		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 743			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 744	}
 745
 746	for (i = 0; i < RX_RING_SIZE; i++) {
 747		struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
 748		yp->rx_skbuff[i] = skb;
 749		if (skb == NULL)
 750			break;
 751		skb->dev = dev;		/* Mark as being used by this device. */
 752		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 753		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 754			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 755	}
 756	if (i != RX_RING_SIZE) {
 757		for (j = 0; j < i; j++)
 758			dev_kfree_skb(yp->rx_skbuff[j]);
 759		return -ENOMEM;
 760	}
 761	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 762	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 763
 764#define NO_TXSTATS
 765#ifdef NO_TXSTATS
 766	/* In this mode the Tx ring needs only a single descriptor. */
 767	for (i = 0; i < TX_RING_SIZE; i++) {
 768		yp->tx_skbuff[i] = NULL;
 769		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 770		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 771			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 772	}
 773	/* Wrap ring */
 774	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 775#else
 776{
 777	/* Tx ring needs a pair of descriptors, the second for the status. */
 778	for (i = 0; i < TX_RING_SIZE; i++) {
 779		j = 2*i;
 780		yp->tx_skbuff[i] = 0;
 781		/* Branch on Tx error. */
 782		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 783		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 784			(j+1)*sizeof(struct yellowfin_desc));
 785		j++;
 786		if (yp->flags & FullTxStatus) {
 787			yp->tx_ring[j].dbdma_cmd =
 788				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 789			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 790			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 791				i*sizeof(struct tx_status_words));
 792		} else {
 793			/* Symbios chips write only tx_errs word. */
 794			yp->tx_ring[j].dbdma_cmd =
 795				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 796			yp->tx_ring[j].request_cnt = 2;
 797			/* Om pade ummmmm... */
 798			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 799				i*sizeof(struct tx_status_words) +
 800				&(yp->tx_status[0].tx_errs) -
 801				&(yp->tx_status[0]));
 802		}
 803		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 804			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 805	}
 806	/* Wrap ring */
 807	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 808}
 809#endif
 810	yp->tx_tail_desc = &yp->tx_status[0];
 811	return 0;
 812}
 813
 814static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 815					struct net_device *dev)
 816{
 817	struct yellowfin_private *yp = netdev_priv(dev);
 818	unsigned entry;
 819	int len = skb->len;
 820
 821	netif_stop_queue (dev);
 822
 823	/* Note: Ordering is important here, set the field with the
 824	   "ownership" bit last, and only then increment cur_tx. */
 825
 826	/* Calculate the next Tx descriptor entry. */
 827	entry = yp->cur_tx % TX_RING_SIZE;
 828
 829	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 830		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 831		/* Fix GX chipset errata. */
 832		if (cacheline_end > 24  || cacheline_end == 0) {
 833			len = skb->len + 32 - cacheline_end + 1;
 834			if (skb_padto(skb, len)) {
 835				yp->tx_skbuff[entry] = NULL;
 836				netif_wake_queue(dev);
 837				return NETDEV_TX_OK;
 838			}
 839		}
 840	}
 841	yp->tx_skbuff[entry] = skb;
 842
 843#ifdef NO_TXSTATS
 844	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 845		skb->data, len, PCI_DMA_TODEVICE));
 846	yp->tx_ring[entry].result_status = 0;
 847	if (entry >= TX_RING_SIZE-1) {
 848		/* New stop command. */
 849		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 850		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 851			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 852	} else {
 853		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 854		yp->tx_ring[entry].dbdma_cmd =
 855			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 856	}
 857	yp->cur_tx++;
 858#else
 859	yp->tx_ring[entry<<1].request_cnt = len;
 860	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 861		skb->data, len, PCI_DMA_TODEVICE));
 862	/* The input_last (status-write) command is constant, but we must
 863	   rewrite the subsequent 'stop' command. */
 864
 865	yp->cur_tx++;
 866	{
 867		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 868		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 869	}
 870	/* Final step -- overwrite the old 'stop' command. */
 871
 872	yp->tx_ring[entry<<1].dbdma_cmd =
 873		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 874					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 875#endif
 876
 877	/* Non-x86 Todo: explicitly flush cache lines here. */
 878
 879	/* Wake the potentially-idle transmit channel. */
 880	iowrite32(0x10001000, yp->base + TxCtrl);
 881
 882	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 883		netif_start_queue (dev);		/* Typical path */
 884	else
 885		yp->tx_full = 1;
 886
 887	if (yellowfin_debug > 4) {
 888		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 889			      yp->cur_tx, entry);
 890	}
 891	return NETDEV_TX_OK;
 892}
 893
 894/* The interrupt handler does all of the Rx thread work and cleans up
 895   after the Tx thread. */
 896static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 897{
 898	struct net_device *dev = dev_instance;
 899	struct yellowfin_private *yp;
 900	void __iomem *ioaddr;
 901	int boguscnt = max_interrupt_work;
 902	unsigned int handled = 0;
 903
 904	yp = netdev_priv(dev);
 905	ioaddr = yp->base;
 906
 907	spin_lock (&yp->lock);
 908
 909	do {
 910		u16 intr_status = ioread16(ioaddr + IntrClear);
 911
 912		if (yellowfin_debug > 4)
 913			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 914				      intr_status);
 915
 916		if (intr_status == 0)
 917			break;
 918		handled = 1;
 919
 920		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 921			yellowfin_rx(dev);
 922			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 923		}
 924
 925#ifdef NO_TXSTATS
 926		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 927			int entry = yp->dirty_tx % TX_RING_SIZE;
 928			struct sk_buff *skb;
 929
 930			if (yp->tx_ring[entry].result_status == 0)
 931				break;
 932			skb = yp->tx_skbuff[entry];
 933			dev->stats.tx_packets++;
 934			dev->stats.tx_bytes += skb->len;
 935			/* Free the original skb. */
 936			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 937				skb->len, PCI_DMA_TODEVICE);
 938			dev_kfree_skb_irq(skb);
 939			yp->tx_skbuff[entry] = NULL;
 940		}
 941		if (yp->tx_full &&
 942		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 943			/* The ring is no longer full, clear tbusy. */
 944			yp->tx_full = 0;
 945			netif_wake_queue(dev);
 946		}
 947#else
 948		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 949			unsigned dirty_tx = yp->dirty_tx;
 950
 951			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 952				 dirty_tx++) {
 953				/* Todo: optimize this. */
 954				int entry = dirty_tx % TX_RING_SIZE;
 955				u16 tx_errs = yp->tx_status[entry].tx_errs;
 956				struct sk_buff *skb;
 957
 958#ifndef final_version
 959				if (yellowfin_debug > 5)
 960					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 961						      entry,
 962						      yp->tx_status[entry].tx_cnt,
 963						      yp->tx_status[entry].tx_errs,
 964						      yp->tx_status[entry].total_tx_cnt,
 965						      yp->tx_status[entry].paused);
 966#endif
 967				if (tx_errs == 0)
 968					break;	/* It still hasn't been Txed */
 969				skb = yp->tx_skbuff[entry];
 970				if (tx_errs & 0xF810) {
 971					/* There was an major error, log it. */
 972#ifndef final_version
 973					if (yellowfin_debug > 1)
 974						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 975							      tx_errs);
 976#endif
 977					dev->stats.tx_errors++;
 978					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 979					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 980					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 981					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 982				} else {
 983#ifndef final_version
 984					if (yellowfin_debug > 4)
 985						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 986							      tx_errs);
 987#endif
 988					dev->stats.tx_bytes += skb->len;
 989					dev->stats.collisions += tx_errs & 15;
 990					dev->stats.tx_packets++;
 991				}
 992				/* Free the original skb. */
 993				pci_unmap_single(yp->pci_dev,
 994					yp->tx_ring[entry<<1].addr, skb->len,
 995					PCI_DMA_TODEVICE);
 996				dev_kfree_skb_irq(skb);
 997				yp->tx_skbuff[entry] = 0;
 998				/* Mark status as empty. */
 999				yp->tx_status[entry].tx_errs = 0;
1000			}
1001
1002#ifndef final_version
1003			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1004				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1005					   dirty_tx, yp->cur_tx, yp->tx_full);
1006				dirty_tx += TX_RING_SIZE;
1007			}
1008#endif
1009
1010			if (yp->tx_full &&
1011			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1012				/* The ring is no longer full, clear tbusy. */
1013				yp->tx_full = 0;
1014				netif_wake_queue(dev);
1015			}
1016
1017			yp->dirty_tx = dirty_tx;
1018			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1019		}
1020#endif
1021
1022		/* Log errors and other uncommon events. */
1023		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1024			yellowfin_error(dev, intr_status);
1025
1026		if (--boguscnt < 0) {
1027			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1028				    intr_status);
1029			break;
1030		}
1031	} while (1);
1032
1033	if (yellowfin_debug > 3)
1034		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1035			      ioread16(ioaddr + IntrStatus));
1036
1037	spin_unlock (&yp->lock);
1038	return IRQ_RETVAL(handled);
1039}
1040
1041/* This routine is logically part of the interrupt handler, but separated
1042   for clarity and better register allocation. */
1043static int yellowfin_rx(struct net_device *dev)
1044{
1045	struct yellowfin_private *yp = netdev_priv(dev);
1046	int entry = yp->cur_rx % RX_RING_SIZE;
1047	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1048
1049	if (yellowfin_debug > 4) {
1050		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1051			   entry, yp->rx_ring[entry].result_status);
1052		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1053			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1054			   yp->rx_ring[entry].result_status);
1055	}
1056
1057	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1058	while (1) {
1059		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1060		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1061		s16 frame_status;
1062		u16 desc_status;
1063		int data_size;
1064		u8 *buf_addr;
1065
1066		if(!desc->result_status)
1067			break;
1068		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1069			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1070		desc_status = le32_to_cpu(desc->result_status) >> 16;
1071		buf_addr = rx_skb->data;
1072		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1073			le32_to_cpu(desc->result_status)) & 0xffff;
1074		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1075		if (yellowfin_debug > 4)
1076			printk(KERN_DEBUG "  %s() status was %04x\n",
1077			       __func__, frame_status);
1078		if (--boguscnt < 0)
1079			break;
1080		if ( ! (desc_status & RX_EOP)) {
1081			if (data_size != 0)
1082				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1083					    desc_status, data_size);
1084			dev->stats.rx_length_errors++;
1085		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1086			/* There was a error. */
1087			if (yellowfin_debug > 3)
1088				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1089				       __func__, frame_status);
1090			dev->stats.rx_errors++;
1091			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1092			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1093			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1094			if (frame_status < 0) dev->stats.rx_dropped++;
1095		} else if ( !(yp->drv_flags & IsGigabit)  &&
1096				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1097			u8 status1 = buf_addr[data_size-2];
1098			u8 status2 = buf_addr[data_size-1];
1099			dev->stats.rx_errors++;
1100			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1101			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1102			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1103			if (status2 & 0x80) dev->stats.rx_dropped++;
1104#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1105		} else if ((yp->flags & HasMACAddrBug)  &&
1106			memcmp(le32_to_cpu(yp->rx_ring_dma +
1107				entry*sizeof(struct yellowfin_desc)),
1108				dev->dev_addr, 6) != 0 &&
1109			memcmp(le32_to_cpu(yp->rx_ring_dma +
1110				entry*sizeof(struct yellowfin_desc)),
1111				"\377\377\377\377\377\377", 6) != 0) {
1112			if (bogus_rx++ == 0)
1113				netdev_warn(dev, "Bad frame to %pM\n",
1114					    buf_addr);
1115#endif
1116		} else {
1117			struct sk_buff *skb;
1118			int pkt_len = data_size -
1119				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1120			/* To verify: Yellowfin Length should omit the CRC! */
1121
1122#ifndef final_version
1123			if (yellowfin_debug > 4)
1124				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1125				       __func__, pkt_len, data_size, boguscnt);
1126#endif
1127			/* Check if the packet is long enough to just pass up the skbuff
1128			   without copying to a properly sized skbuff. */
1129			if (pkt_len > rx_copybreak) {
1130				skb_put(skb = rx_skb, pkt_len);
1131				pci_unmap_single(yp->pci_dev,
1132					le32_to_cpu(yp->rx_ring[entry].addr),
1133					yp->rx_buf_sz,
1134					PCI_DMA_FROMDEVICE);
1135				yp->rx_skbuff[entry] = NULL;
1136			} else {
1137				skb = dev_alloc_skb(pkt_len + 2);
1138				if (skb == NULL)
1139					break;
1140				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1141				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1142				skb_put(skb, pkt_len);
1143				pci_dma_sync_single_for_device(yp->pci_dev,
1144								le32_to_cpu(desc->addr),
1145								yp->rx_buf_sz,
1146								PCI_DMA_FROMDEVICE);
1147			}
1148			skb->protocol = eth_type_trans(skb, dev);
1149			netif_rx(skb);
1150			dev->stats.rx_packets++;
1151			dev->stats.rx_bytes += pkt_len;
1152		}
1153		entry = (++yp->cur_rx) % RX_RING_SIZE;
1154	}
1155
1156	/* Refill the Rx ring buffers. */
1157	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1158		entry = yp->dirty_rx % RX_RING_SIZE;
1159		if (yp->rx_skbuff[entry] == NULL) {
1160			struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
1161			if (skb == NULL)
1162				break;				/* Better luck next round. */
1163			yp->rx_skbuff[entry] = skb;
1164			skb->dev = dev;	/* Mark as being used by this device. */
1165			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1166			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1167				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1168		}
1169		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1170		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1171		if (entry != 0)
1172			yp->rx_ring[entry - 1].dbdma_cmd =
1173				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1174		else
1175			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1176				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1177							| yp->rx_buf_sz);
1178	}
1179
1180	return 0;
1181}
1182
1183static void yellowfin_error(struct net_device *dev, int intr_status)
1184{
1185	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1186	/* Hmmmmm, it's not clear what to do here. */
1187	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1188		dev->stats.tx_errors++;
1189	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1190		dev->stats.rx_errors++;
1191}
1192
1193static int yellowfin_close(struct net_device *dev)
1194{
1195	struct yellowfin_private *yp = netdev_priv(dev);
1196	void __iomem *ioaddr = yp->base;
1197	int i;
1198
1199	netif_stop_queue (dev);
1200
1201	if (yellowfin_debug > 1) {
1202		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1203			      ioread16(ioaddr + TxStatus),
1204			      ioread16(ioaddr + RxStatus),
1205			      ioread16(ioaddr + IntrStatus));
1206		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1207			      yp->cur_tx, yp->dirty_tx,
1208			      yp->cur_rx, yp->dirty_rx);
1209	}
1210
1211	/* Disable interrupts by clearing the interrupt mask. */
1212	iowrite16(0x0000, ioaddr + IntrEnb);
1213
1214	/* Stop the chip's Tx and Rx processes. */
1215	iowrite32(0x80000000, ioaddr + RxCtrl);
1216	iowrite32(0x80000000, ioaddr + TxCtrl);
1217
1218	del_timer(&yp->timer);
1219
1220#if defined(__i386__)
1221	if (yellowfin_debug > 2) {
1222		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1223				(unsigned long long)yp->tx_ring_dma);
1224		for (i = 0; i < TX_RING_SIZE*2; i++)
1225			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1226				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1227				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1228				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1229		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1230		for (i = 0; i < TX_RING_SIZE; i++)
1231			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1232				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1233				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1234
1235		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1236				(unsigned long long)yp->rx_ring_dma);
1237		for (i = 0; i < RX_RING_SIZE; i++) {
1238			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1239				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1240				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1241				   yp->rx_ring[i].result_status);
1242			if (yellowfin_debug > 6) {
1243				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1244					int j;
1245
1246					printk(KERN_DEBUG);
1247					for (j = 0; j < 0x50; j++)
1248						pr_cont(" %04x",
1249							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1250					pr_cont("\n");
1251				}
1252			}
1253		}
1254	}
1255#endif /* __i386__ debugging only */
1256
1257	free_irq(dev->irq, dev);
1258
1259	/* Free all the skbuffs in the Rx queue. */
1260	for (i = 0; i < RX_RING_SIZE; i++) {
1261		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1262		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1263		if (yp->rx_skbuff[i]) {
1264			dev_kfree_skb(yp->rx_skbuff[i]);
1265		}
1266		yp->rx_skbuff[i] = NULL;
1267	}
1268	for (i = 0; i < TX_RING_SIZE; i++) {
1269		if (yp->tx_skbuff[i])
1270			dev_kfree_skb(yp->tx_skbuff[i]);
1271		yp->tx_skbuff[i] = NULL;
1272	}
1273
1274#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1275	if (yellowfin_debug > 0) {
1276		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1277			      bogus_rx);
1278	}
1279#endif
1280
1281	return 0;
1282}
1283
1284/* Set or clear the multicast filter for this adaptor. */
1285
1286static void set_rx_mode(struct net_device *dev)
1287{
1288	struct yellowfin_private *yp = netdev_priv(dev);
1289	void __iomem *ioaddr = yp->base;
1290	u16 cfg_value = ioread16(ioaddr + Cnfg);
1291
1292	/* Stop the Rx process to change any value. */
1293	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1294	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1295		iowrite16(0x000F, ioaddr + AddrMode);
1296	} else if ((netdev_mc_count(dev) > 64) ||
1297		   (dev->flags & IFF_ALLMULTI)) {
1298		/* Too many to filter well, or accept all multicasts. */
1299		iowrite16(0x000B, ioaddr + AddrMode);
1300	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1301		struct netdev_hw_addr *ha;
1302		u16 hash_table[4];
1303		int i;
1304
1305		memset(hash_table, 0, sizeof(hash_table));
1306		netdev_for_each_mc_addr(ha, dev) {
1307			unsigned int bit;
1308
1309			/* Due to a bug in the early chip versions, multiple filter
1310			   slots must be set for each address. */
1311			if (yp->drv_flags & HasMulticastBug) {
1312				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1313				hash_table[bit >> 4] |= (1 << bit);
1314				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1315				hash_table[bit >> 4] |= (1 << bit);
1316				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1317				hash_table[bit >> 4] |= (1 << bit);
1318			}
1319			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1320			hash_table[bit >> 4] |= (1 << bit);
1321		}
1322		/* Copy the hash table to the chip. */
1323		for (i = 0; i < 4; i++)
1324			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1325		iowrite16(0x0003, ioaddr + AddrMode);
1326	} else {					/* Normal, unicast/broadcast-only mode. */
1327		iowrite16(0x0001, ioaddr + AddrMode);
1328	}
1329	/* Restart the Rx process. */
1330	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1331}
1332
1333static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1334{
1335	struct yellowfin_private *np = netdev_priv(dev);
1336	strcpy(info->driver, DRV_NAME);
1337	strcpy(info->version, DRV_VERSION);
1338	strcpy(info->bus_info, pci_name(np->pci_dev));
1339}
1340
1341static const struct ethtool_ops ethtool_ops = {
1342	.get_drvinfo = yellowfin_get_drvinfo
1343};
1344
1345static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1346{
1347	struct yellowfin_private *np = netdev_priv(dev);
1348	void __iomem *ioaddr = np->base;
1349	struct mii_ioctl_data *data = if_mii(rq);
1350
1351	switch(cmd) {
1352	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1353		data->phy_id = np->phys[0] & 0x1f;
1354		/* Fall Through */
1355
1356	case SIOCGMIIREG:		/* Read MII PHY register. */
1357		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1358		return 0;
1359
1360	case SIOCSMIIREG:		/* Write MII PHY register. */
1361		if (data->phy_id == np->phys[0]) {
1362			u16 value = data->val_in;
1363			switch (data->reg_num) {
1364			case 0:
1365				/* Check for autonegotiation on or reset. */
1366				np->medialock = (value & 0x9000) ? 0 : 1;
1367				if (np->medialock)
1368					np->full_duplex = (value & 0x0100) ? 1 : 0;
1369				break;
1370			case 4: np->advertising = value; break;
1371			}
1372			/* Perhaps check_duplex(dev), depending on chip semantics. */
1373		}
1374		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1375		return 0;
1376	default:
1377		return -EOPNOTSUPP;
1378	}
1379}
1380
1381
1382static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1383{
1384	struct net_device *dev = pci_get_drvdata(pdev);
1385	struct yellowfin_private *np;
1386
1387	BUG_ON(!dev);
1388	np = netdev_priv(dev);
1389
1390        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1391		np->tx_status_dma);
1392	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1393	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1394	unregister_netdev (dev);
1395
1396	pci_iounmap(pdev, np->base);
1397
1398	pci_release_regions (pdev);
1399
1400	free_netdev (dev);
1401	pci_set_drvdata(pdev, NULL);
1402}
1403
1404
1405static struct pci_driver yellowfin_driver = {
1406	.name		= DRV_NAME,
1407	.id_table	= yellowfin_pci_tbl,
1408	.probe		= yellowfin_init_one,
1409	.remove		= __devexit_p(yellowfin_remove_one),
1410};
1411
1412
1413static int __init yellowfin_init (void)
1414{
1415/* when a module, this is printed whether or not devices are found in probe */
1416#ifdef MODULE
1417	printk(version);
1418#endif
1419	return pci_register_driver(&yellowfin_driver);
1420}
1421
1422
1423static void __exit yellowfin_cleanup (void)
1424{
1425	pci_unregister_driver (&yellowfin_driver);
1426}
1427
1428
1429module_init(yellowfin_init);
1430module_exit(yellowfin_cleanup);