Linux Audio

Check our new training course

Loading...
v6.8
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verify the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_eth_ioctl		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 387	u8 addr[ETH_ALEN];
 388
 389/* when built into the kernel, we only print version if device is found */
 390#ifndef MODULE
 391	static int printed_version;
 392	if (!printed_version++)
 393		printk(version);
 394#endif
 395
 396	i = pci_enable_device(pdev);
 397	if (i) return i;
 398
 399	dev = alloc_etherdev(sizeof(*np));
 400	if (!dev)
 401		return -ENOMEM;
 402
 403	SET_NETDEV_DEV(dev, &pdev->dev);
 404
 405	np = netdev_priv(dev);
 406
 407	if (pci_request_regions(pdev, DRV_NAME))
 408		goto err_out_free_netdev;
 409
 410	pci_set_master (pdev);
 411
 412	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 413	if (!ioaddr)
 414		goto err_out_free_res;
 415
 416	irq = pdev->irq;
 417
 418	if (drv_flags & DontUseEeprom)
 419		for (i = 0; i < 6; i++)
 420			addr[i] = ioread8(ioaddr + StnAddr + i);
 421	else {
 422		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 423		for (i = 0; i < 6; i++)
 424			addr[i] = read_eeprom(ioaddr, ee_offset + i);
 425	}
 426	eth_hw_addr_set(dev, addr);
 427
 428	/* Reset the chip. */
 429	iowrite32(0x80000000, ioaddr + DMACtrl);
 430
 431	pci_set_drvdata(pdev, dev);
 432	spin_lock_init(&np->lock);
 433
 434	np->pci_dev = pdev;
 435	np->chip_id = chip_idx;
 436	np->drv_flags = drv_flags;
 437	np->base = ioaddr;
 438
 439	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 440					GFP_KERNEL);
 441	if (!ring_space)
 442		goto err_out_cleardev;
 443	np->tx_ring = ring_space;
 444	np->tx_ring_dma = ring_dma;
 445
 446	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 447					GFP_KERNEL);
 448	if (!ring_space)
 449		goto err_out_unmap_tx;
 450	np->rx_ring = ring_space;
 451	np->rx_ring_dma = ring_dma;
 452
 453	ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
 454					&ring_dma, GFP_KERNEL);
 455	if (!ring_space)
 456		goto err_out_unmap_rx;
 457	np->tx_status = ring_space;
 458	np->tx_status_dma = ring_dma;
 459
 460	if (dev->mem_start)
 461		option = dev->mem_start;
 462
 463	/* The lower four bits are the media type. */
 464	if (option > 0) {
 465		if (option & 0x200)
 466			np->full_duplex = 1;
 467		np->default_port = option & 15;
 468		if (np->default_port)
 469			np->medialock = 1;
 470	}
 471	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 472		np->full_duplex = 1;
 473
 474	if (np->full_duplex)
 475		np->duplex_lock = 1;
 476
 477	/* The Yellowfin-specific entries in the device structure. */
 478	dev->netdev_ops = &netdev_ops;
 479	dev->ethtool_ops = &ethtool_ops;
 480	dev->watchdog_timeo = TX_TIMEOUT;
 481
 482	if (mtu)
 483		dev->mtu = mtu;
 484
 485	i = register_netdev(dev);
 486	if (i)
 487		goto err_out_unmap_status;
 488
 489	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 490		    pci_id_tbl[chip_idx].name,
 491		    ioread32(ioaddr + ChipRev), ioaddr,
 492		    dev->dev_addr, irq);
 493
 494	if (np->drv_flags & HasMII) {
 495		int phy, phy_idx = 0;
 496		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 497			int mii_status = mdio_read(ioaddr, phy, 1);
 498			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 499				np->phys[phy_idx++] = phy;
 500				np->advertising = mdio_read(ioaddr, phy, 4);
 501				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 502					    phy, mii_status, np->advertising);
 503			}
 504		}
 505		np->mii_cnt = phy_idx;
 506	}
 507
 508	find_cnt++;
 509
 510	return 0;
 511
 512err_out_unmap_status:
 513	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
 514			  np->tx_status_dma);
 515err_out_unmap_rx:
 516	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 517			  np->rx_ring_dma);
 518err_out_unmap_tx:
 519	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 520			  np->tx_ring_dma);
 521err_out_cleardev:
 522	pci_iounmap(pdev, ioaddr);
 523err_out_free_res:
 524	pci_release_regions(pdev);
 525err_out_free_netdev:
 526	free_netdev (dev);
 527	return -ENODEV;
 528}
 529
 530static int read_eeprom(void __iomem *ioaddr, int location)
 531{
 532	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 533
 534	iowrite8(location, ioaddr + EEAddr);
 535	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 536	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 537		;
 538	return ioread8(ioaddr + EERead);
 539}
 540
 541/* MII Managemen Data I/O accesses.
 542   These routines assume the MDIO controller is idle, and do not exit until
 543   the command is finished. */
 544
 545static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 546{
 547	int i;
 548
 549	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 550	iowrite16(1, ioaddr + MII_Cmd);
 551	for (i = 10000; i >= 0; i--)
 552		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 553			break;
 554	return ioread16(ioaddr + MII_Rd_Data);
 555}
 556
 557static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 558{
 559	int i;
 560
 561	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 562	iowrite16(value, ioaddr + MII_Wr_Data);
 563
 564	/* Wait for the command to finish. */
 565	for (i = 10000; i >= 0; i--)
 566		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 567			break;
 568}
 569
 570
 571static int yellowfin_open(struct net_device *dev)
 572{
 573	struct yellowfin_private *yp = netdev_priv(dev);
 574	const int irq = yp->pci_dev->irq;
 575	void __iomem *ioaddr = yp->base;
 576	int i, rc;
 577
 578	/* Reset the chip. */
 579	iowrite32(0x80000000, ioaddr + DMACtrl);
 580
 581	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 582	if (rc)
 583		return rc;
 584
 585	rc = yellowfin_init_ring(dev);
 586	if (rc < 0)
 587		goto err_free_irq;
 588
 589	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 590	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 591
 592	for (i = 0; i < 6; i++)
 593		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 594
 595	/* Set up various condition 'select' registers.
 596	   There are no options here. */
 597	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 598	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 599	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 600	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 601	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 602	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 603
 604	/* Initialize other registers: with so many this eventually this will
 605	   converted to an offset/value list. */
 606	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 607	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 608	/* Enable automatic generation of flow control frames, period 0xffff. */
 609	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 610
 611	yp->tx_threshold = 32;
 612	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 613
 614	if (dev->if_port == 0)
 615		dev->if_port = yp->default_port;
 616
 617	netif_start_queue(dev);
 618
 619	/* Setting the Rx mode will start the Rx process. */
 620	if (yp->drv_flags & IsGigabit) {
 621		/* We are always in full-duplex mode with gigabit! */
 622		yp->full_duplex = 1;
 623		iowrite16(0x01CF, ioaddr + Cnfg);
 624	} else {
 625		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 626		iowrite16(0x1018, ioaddr + FrameGap1);
 627		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 628	}
 629	set_rx_mode(dev);
 630
 631	/* Enable interrupts by setting the interrupt mask. */
 632	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 633	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 634	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 635	iowrite32(0x80008000, ioaddr + TxCtrl);
 636
 637	if (yellowfin_debug > 2) {
 638		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 639	}
 640
 641	/* Set the timer to check for link beat. */
 642	timer_setup(&yp->timer, yellowfin_timer, 0);
 643	yp->timer.expires = jiffies + 3*HZ;
 644	add_timer(&yp->timer);
 645out:
 646	return rc;
 647
 648err_free_irq:
 649	free_irq(irq, dev);
 650	goto out;
 651}
 652
 653static void yellowfin_timer(struct timer_list *t)
 654{
 655	struct yellowfin_private *yp = from_timer(yp, t, timer);
 656	struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 657	void __iomem *ioaddr = yp->base;
 658	int next_tick = 60*HZ;
 659
 660	if (yellowfin_debug > 3) {
 661		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 662			      ioread16(ioaddr + IntrStatus));
 663	}
 664
 665	if (yp->mii_cnt) {
 666		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 667		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 668		int negotiated = lpa & yp->advertising;
 669		if (yellowfin_debug > 1)
 670			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 671				      yp->phys[0], bmsr, lpa);
 672
 673		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 674
 675		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 676
 677		if (bmsr & BMSR_LSTATUS)
 678			next_tick = 60*HZ;
 679		else
 680			next_tick = 3*HZ;
 681	}
 682
 683	yp->timer.expires = jiffies + next_tick;
 684	add_timer(&yp->timer);
 685}
 686
 687static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
 688{
 689	struct yellowfin_private *yp = netdev_priv(dev);
 690	void __iomem *ioaddr = yp->base;
 691
 692	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 693		    yp->cur_tx, yp->dirty_tx,
 694		    ioread32(ioaddr + TxStatus),
 695		    ioread32(ioaddr + RxStatus));
 696
 697	/* Note: these should be KERN_DEBUG. */
 698	if (yellowfin_debug) {
 699		int i;
 700		pr_warn("  Rx ring %p: ", yp->rx_ring);
 701		for (i = 0; i < RX_RING_SIZE; i++)
 702			pr_cont(" %08x", yp->rx_ring[i].result_status);
 703		pr_cont("\n");
 704		pr_warn("  Tx ring %p: ", yp->tx_ring);
 705		for (i = 0; i < TX_RING_SIZE; i++)
 706			pr_cont(" %04x /%08x",
 707			       yp->tx_status[i].tx_errs,
 708			       yp->tx_ring[i].result_status);
 709		pr_cont("\n");
 710	}
 711
 712	/* If the hardware is found to hang regularly, we will update the code
 713	   to reinitialize the chip here. */
 714	dev->if_port = 0;
 715
 716	/* Wake the potentially-idle transmit channel. */
 717	iowrite32(0x10001000, yp->base + TxCtrl);
 718	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 719		netif_wake_queue (dev);		/* Typical path */
 720
 721	netif_trans_update(dev); /* prevent tx timeout */
 722	dev->stats.tx_errors++;
 723}
 724
 725/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 726static int yellowfin_init_ring(struct net_device *dev)
 727{
 728	struct yellowfin_private *yp = netdev_priv(dev);
 729	int i, j;
 730
 731	yp->tx_full = 0;
 732	yp->cur_rx = yp->cur_tx = 0;
 733	yp->dirty_tx = 0;
 734
 735	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 736
 737	for (i = 0; i < RX_RING_SIZE; i++) {
 738		yp->rx_ring[i].dbdma_cmd =
 739			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 740		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 741			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 742	}
 743
 744	for (i = 0; i < RX_RING_SIZE; i++) {
 745		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 746		yp->rx_skbuff[i] = skb;
 747		if (skb == NULL)
 748			break;
 749		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 750		yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 751								 skb->data,
 752								 yp->rx_buf_sz,
 753								 DMA_FROM_DEVICE));
 754	}
 755	if (i != RX_RING_SIZE) {
 756		for (j = 0; j < i; j++)
 757			dev_kfree_skb(yp->rx_skbuff[j]);
 758		return -ENOMEM;
 759	}
 760	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 761	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 762
 763#define NO_TXSTATS
 764#ifdef NO_TXSTATS
 765	/* In this mode the Tx ring needs only a single descriptor. */
 766	for (i = 0; i < TX_RING_SIZE; i++) {
 767		yp->tx_skbuff[i] = NULL;
 768		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 769		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 770			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 771	}
 772	/* Wrap ring */
 773	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 774#else
 775{
 776	/* Tx ring needs a pair of descriptors, the second for the status. */
 777	for (i = 0; i < TX_RING_SIZE; i++) {
 778		j = 2*i;
 779		yp->tx_skbuff[i] = 0;
 780		/* Branch on Tx error. */
 781		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 782		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 783			(j+1)*sizeof(struct yellowfin_desc));
 784		j++;
 785		if (yp->flags & FullTxStatus) {
 786			yp->tx_ring[j].dbdma_cmd =
 787				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 788			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 789			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 790				i*sizeof(struct tx_status_words));
 791		} else {
 792			/* Symbios chips write only tx_errs word. */
 793			yp->tx_ring[j].dbdma_cmd =
 794				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 795			yp->tx_ring[j].request_cnt = 2;
 796			/* Om pade ummmmm... */
 797			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 798				i*sizeof(struct tx_status_words) +
 799				&(yp->tx_status[0].tx_errs) -
 800				&(yp->tx_status[0]));
 801		}
 802		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 803			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 804	}
 805	/* Wrap ring */
 806	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 807}
 808#endif
 809	yp->tx_tail_desc = &yp->tx_status[0];
 810	return 0;
 811}
 812
 813static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 814					struct net_device *dev)
 815{
 816	struct yellowfin_private *yp = netdev_priv(dev);
 817	unsigned entry;
 818	int len = skb->len;
 819
 820	netif_stop_queue (dev);
 821
 822	/* Note: Ordering is important here, set the field with the
 823	   "ownership" bit last, and only then increment cur_tx. */
 824
 825	/* Calculate the next Tx descriptor entry. */
 826	entry = yp->cur_tx % TX_RING_SIZE;
 827
 828	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 829		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 830		/* Fix GX chipset errata. */
 831		if (cacheline_end > 24  || cacheline_end == 0) {
 832			len = skb->len + 32 - cacheline_end + 1;
 833			if (skb_padto(skb, len)) {
 834				yp->tx_skbuff[entry] = NULL;
 835				netif_wake_queue(dev);
 836				return NETDEV_TX_OK;
 837			}
 838		}
 839	}
 840	yp->tx_skbuff[entry] = skb;
 841
 842#ifdef NO_TXSTATS
 843	yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 844							     skb->data,
 845							     len, DMA_TO_DEVICE));
 846	yp->tx_ring[entry].result_status = 0;
 847	if (entry >= TX_RING_SIZE-1) {
 848		/* New stop command. */
 849		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 850		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 851			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 852	} else {
 853		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 854		yp->tx_ring[entry].dbdma_cmd =
 855			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 856	}
 857	yp->cur_tx++;
 858#else
 859	yp->tx_ring[entry<<1].request_cnt = len;
 860	yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 861								skb->data,
 862								len, DMA_TO_DEVICE));
 863	/* The input_last (status-write) command is constant, but we must
 864	   rewrite the subsequent 'stop' command. */
 865
 866	yp->cur_tx++;
 867	{
 868		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 869		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 870	}
 871	/* Final step -- overwrite the old 'stop' command. */
 872
 873	yp->tx_ring[entry<<1].dbdma_cmd =
 874		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 875					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 876#endif
 877
 878	/* Non-x86 Todo: explicitly flush cache lines here. */
 879
 880	/* Wake the potentially-idle transmit channel. */
 881	iowrite32(0x10001000, yp->base + TxCtrl);
 882
 883	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 884		netif_start_queue (dev);		/* Typical path */
 885	else
 886		yp->tx_full = 1;
 887
 888	if (yellowfin_debug > 4) {
 889		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 890			      yp->cur_tx, entry);
 891	}
 892	return NETDEV_TX_OK;
 893}
 894
 895/* The interrupt handler does all of the Rx thread work and cleans up
 896   after the Tx thread. */
 897static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 898{
 899	struct net_device *dev = dev_instance;
 900	struct yellowfin_private *yp;
 901	void __iomem *ioaddr;
 902	int boguscnt = max_interrupt_work;
 903	unsigned int handled = 0;
 904
 905	yp = netdev_priv(dev);
 906	ioaddr = yp->base;
 907
 908	spin_lock (&yp->lock);
 909
 910	do {
 911		u16 intr_status = ioread16(ioaddr + IntrClear);
 912
 913		if (yellowfin_debug > 4)
 914			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 915				      intr_status);
 916
 917		if (intr_status == 0)
 918			break;
 919		handled = 1;
 920
 921		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 922			yellowfin_rx(dev);
 923			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 924		}
 925
 926#ifdef NO_TXSTATS
 927		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 928			int entry = yp->dirty_tx % TX_RING_SIZE;
 929			struct sk_buff *skb;
 930
 931			if (yp->tx_ring[entry].result_status == 0)
 932				break;
 933			skb = yp->tx_skbuff[entry];
 934			dev->stats.tx_packets++;
 935			dev->stats.tx_bytes += skb->len;
 936			/* Free the original skb. */
 937			dma_unmap_single(&yp->pci_dev->dev,
 938					 le32_to_cpu(yp->tx_ring[entry].addr),
 939					 skb->len, DMA_TO_DEVICE);
 940			dev_consume_skb_irq(skb);
 941			yp->tx_skbuff[entry] = NULL;
 942		}
 943		if (yp->tx_full &&
 944		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 945			/* The ring is no longer full, clear tbusy. */
 946			yp->tx_full = 0;
 947			netif_wake_queue(dev);
 948		}
 949#else
 950		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 951			unsigned dirty_tx = yp->dirty_tx;
 952
 953			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 954				 dirty_tx++) {
 955				/* Todo: optimize this. */
 956				int entry = dirty_tx % TX_RING_SIZE;
 957				u16 tx_errs = yp->tx_status[entry].tx_errs;
 958				struct sk_buff *skb;
 959
 960#ifndef final_version
 961				if (yellowfin_debug > 5)
 962					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 963						      entry,
 964						      yp->tx_status[entry].tx_cnt,
 965						      yp->tx_status[entry].tx_errs,
 966						      yp->tx_status[entry].total_tx_cnt,
 967						      yp->tx_status[entry].paused);
 968#endif
 969				if (tx_errs == 0)
 970					break;	/* It still hasn't been Txed */
 971				skb = yp->tx_skbuff[entry];
 972				if (tx_errs & 0xF810) {
 973					/* There was an major error, log it. */
 974#ifndef final_version
 975					if (yellowfin_debug > 1)
 976						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 977							      tx_errs);
 978#endif
 979					dev->stats.tx_errors++;
 980					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 981					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 982					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 983					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 984				} else {
 985#ifndef final_version
 986					if (yellowfin_debug > 4)
 987						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 988							      tx_errs);
 989#endif
 990					dev->stats.tx_bytes += skb->len;
 991					dev->stats.collisions += tx_errs & 15;
 992					dev->stats.tx_packets++;
 993				}
 994				/* Free the original skb. */
 995				dma_unmap_single(&yp->pci_dev->dev,
 996						 yp->tx_ring[entry << 1].addr,
 997						 skb->len, DMA_TO_DEVICE);
 998				dev_consume_skb_irq(skb);
 999				yp->tx_skbuff[entry] = 0;
1000				/* Mark status as empty. */
1001				yp->tx_status[entry].tx_errs = 0;
1002			}
1003
1004#ifndef final_version
1005			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1006				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1007					   dirty_tx, yp->cur_tx, yp->tx_full);
1008				dirty_tx += TX_RING_SIZE;
1009			}
1010#endif
1011
1012			if (yp->tx_full &&
1013			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1014				/* The ring is no longer full, clear tbusy. */
1015				yp->tx_full = 0;
1016				netif_wake_queue(dev);
1017			}
1018
1019			yp->dirty_tx = dirty_tx;
1020			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1021		}
1022#endif
1023
1024		/* Log errors and other uncommon events. */
1025		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1026			yellowfin_error(dev, intr_status);
1027
1028		if (--boguscnt < 0) {
1029			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1030				    intr_status);
1031			break;
1032		}
1033	} while (1);
1034
1035	if (yellowfin_debug > 3)
1036		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1037			      ioread16(ioaddr + IntrStatus));
1038
1039	spin_unlock (&yp->lock);
1040	return IRQ_RETVAL(handled);
1041}
1042
1043/* This routine is logically part of the interrupt handler, but separated
1044   for clarity and better register allocation. */
1045static int yellowfin_rx(struct net_device *dev)
1046{
1047	struct yellowfin_private *yp = netdev_priv(dev);
1048	int entry = yp->cur_rx % RX_RING_SIZE;
1049	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1050
1051	if (yellowfin_debug > 4) {
1052		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1053			   entry, yp->rx_ring[entry].result_status);
1054		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1055			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1056			   yp->rx_ring[entry].result_status);
1057	}
1058
1059	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1060	while (1) {
1061		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1062		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1063		s16 frame_status;
1064		u16 desc_status;
1065		int data_size, __maybe_unused yf_size;
1066		u8 *buf_addr;
1067
1068		if(!desc->result_status)
1069			break;
1070		dma_sync_single_for_cpu(&yp->pci_dev->dev,
1071					le32_to_cpu(desc->addr),
1072					yp->rx_buf_sz, DMA_FROM_DEVICE);
1073		desc_status = le32_to_cpu(desc->result_status) >> 16;
1074		buf_addr = rx_skb->data;
1075		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1076			le32_to_cpu(desc->result_status)) & 0xffff;
1077		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1078		if (yellowfin_debug > 4)
1079			printk(KERN_DEBUG "  %s() status was %04x\n",
1080			       __func__, frame_status);
1081		if (--boguscnt < 0)
1082			break;
1083
1084		yf_size = sizeof(struct yellowfin_desc);
1085
1086		if ( ! (desc_status & RX_EOP)) {
1087			if (data_size != 0)
1088				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1089					    desc_status, data_size);
1090			dev->stats.rx_length_errors++;
1091		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1092			/* There was a error. */
1093			if (yellowfin_debug > 3)
1094				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1095				       __func__, frame_status);
1096			dev->stats.rx_errors++;
1097			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1098			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1099			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1100			if (frame_status < 0) dev->stats.rx_dropped++;
1101		} else if ( !(yp->drv_flags & IsGigabit)  &&
1102				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1103			u8 status1 = buf_addr[data_size-2];
1104			u8 status2 = buf_addr[data_size-1];
1105			dev->stats.rx_errors++;
1106			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1107			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1108			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1109			if (status2 & 0x80) dev->stats.rx_dropped++;
1110#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1111		} else if ((yp->flags & HasMACAddrBug)  &&
1112			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1113						      entry * yf_size),
1114					  dev->dev_addr) &&
1115			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1116						      entry * yf_size),
1117					  "\377\377\377\377\377\377")) {
1118			if (bogus_rx++ == 0)
1119				netdev_warn(dev, "Bad frame to %pM\n",
1120					    buf_addr);
1121#endif
1122		} else {
1123			struct sk_buff *skb;
1124			int pkt_len = data_size -
1125				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1126			/* To verify: Yellowfin Length should omit the CRC! */
1127
1128#ifndef final_version
1129			if (yellowfin_debug > 4)
1130				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1131				       __func__, pkt_len, data_size, boguscnt);
1132#endif
1133			/* Check if the packet is long enough to just pass up the skbuff
1134			   without copying to a properly sized skbuff. */
1135			if (pkt_len > rx_copybreak) {
1136				skb_put(skb = rx_skb, pkt_len);
1137				dma_unmap_single(&yp->pci_dev->dev,
1138						 le32_to_cpu(yp->rx_ring[entry].addr),
1139						 yp->rx_buf_sz,
1140						 DMA_FROM_DEVICE);
1141				yp->rx_skbuff[entry] = NULL;
1142			} else {
1143				skb = netdev_alloc_skb(dev, pkt_len + 2);
1144				if (skb == NULL)
1145					break;
1146				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1147				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1148				skb_put(skb, pkt_len);
1149				dma_sync_single_for_device(&yp->pci_dev->dev,
1150							   le32_to_cpu(desc->addr),
1151							   yp->rx_buf_sz,
1152							   DMA_FROM_DEVICE);
1153			}
1154			skb->protocol = eth_type_trans(skb, dev);
1155			netif_rx(skb);
1156			dev->stats.rx_packets++;
1157			dev->stats.rx_bytes += pkt_len;
1158		}
1159		entry = (++yp->cur_rx) % RX_RING_SIZE;
1160	}
1161
1162	/* Refill the Rx ring buffers. */
1163	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1164		entry = yp->dirty_rx % RX_RING_SIZE;
1165		if (yp->rx_skbuff[entry] == NULL) {
1166			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1167			if (skb == NULL)
1168				break;				/* Better luck next round. */
1169			yp->rx_skbuff[entry] = skb;
1170			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1171			yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1172									     skb->data,
1173									     yp->rx_buf_sz,
1174									     DMA_FROM_DEVICE));
1175		}
1176		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1177		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1178		if (entry != 0)
1179			yp->rx_ring[entry - 1].dbdma_cmd =
1180				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1181		else
1182			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1183				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1184							| yp->rx_buf_sz);
1185	}
1186
1187	return 0;
1188}
1189
1190static void yellowfin_error(struct net_device *dev, int intr_status)
1191{
1192	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1193	/* Hmmmmm, it's not clear what to do here. */
1194	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1195		dev->stats.tx_errors++;
1196	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1197		dev->stats.rx_errors++;
1198}
1199
1200static int yellowfin_close(struct net_device *dev)
1201{
1202	struct yellowfin_private *yp = netdev_priv(dev);
1203	void __iomem *ioaddr = yp->base;
1204	int i;
1205
1206	netif_stop_queue (dev);
1207
1208	if (yellowfin_debug > 1) {
1209		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1210			      ioread16(ioaddr + TxStatus),
1211			      ioread16(ioaddr + RxStatus),
1212			      ioread16(ioaddr + IntrStatus));
1213		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1214			      yp->cur_tx, yp->dirty_tx,
1215			      yp->cur_rx, yp->dirty_rx);
1216	}
1217
1218	/* Disable interrupts by clearing the interrupt mask. */
1219	iowrite16(0x0000, ioaddr + IntrEnb);
1220
1221	/* Stop the chip's Tx and Rx processes. */
1222	iowrite32(0x80000000, ioaddr + RxCtrl);
1223	iowrite32(0x80000000, ioaddr + TxCtrl);
1224
1225	del_timer(&yp->timer);
1226
1227#if defined(__i386__)
1228	if (yellowfin_debug > 2) {
1229		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1230				(unsigned long long)yp->tx_ring_dma);
1231		for (i = 0; i < TX_RING_SIZE*2; i++)
1232			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1233				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1234				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1235				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1236		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1237		for (i = 0; i < TX_RING_SIZE; i++)
1238			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1239				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1240				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1241
1242		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1243				(unsigned long long)yp->rx_ring_dma);
1244		for (i = 0; i < RX_RING_SIZE; i++) {
1245			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1246				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1247				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1248				   yp->rx_ring[i].result_status);
1249			if (yellowfin_debug > 6) {
1250				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1251					int j;
1252
1253					printk(KERN_DEBUG);
1254					for (j = 0; j < 0x50; j++)
1255						pr_cont(" %04x",
1256							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1257					pr_cont("\n");
1258				}
1259			}
1260		}
1261	}
1262#endif /* __i386__ debugging only */
1263
1264	free_irq(yp->pci_dev->irq, dev);
1265
1266	/* Free all the skbuffs in the Rx queue. */
1267	for (i = 0; i < RX_RING_SIZE; i++) {
1268		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1269		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1270		if (yp->rx_skbuff[i]) {
1271			dev_kfree_skb(yp->rx_skbuff[i]);
1272		}
1273		yp->rx_skbuff[i] = NULL;
1274	}
1275	for (i = 0; i < TX_RING_SIZE; i++) {
1276		dev_kfree_skb(yp->tx_skbuff[i]);
 
1277		yp->tx_skbuff[i] = NULL;
1278	}
1279
1280#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1281	if (yellowfin_debug > 0) {
1282		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1283			      bogus_rx);
1284	}
1285#endif
1286
1287	return 0;
1288}
1289
1290/* Set or clear the multicast filter for this adaptor. */
1291
1292static void set_rx_mode(struct net_device *dev)
1293{
1294	struct yellowfin_private *yp = netdev_priv(dev);
1295	void __iomem *ioaddr = yp->base;
1296	u16 cfg_value = ioread16(ioaddr + Cnfg);
1297
1298	/* Stop the Rx process to change any value. */
1299	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1300	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1301		iowrite16(0x000F, ioaddr + AddrMode);
1302	} else if ((netdev_mc_count(dev) > 64) ||
1303		   (dev->flags & IFF_ALLMULTI)) {
1304		/* Too many to filter well, or accept all multicasts. */
1305		iowrite16(0x000B, ioaddr + AddrMode);
1306	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1307		struct netdev_hw_addr *ha;
1308		u16 hash_table[4];
1309		int i;
1310
1311		memset(hash_table, 0, sizeof(hash_table));
1312		netdev_for_each_mc_addr(ha, dev) {
1313			unsigned int bit;
1314
1315			/* Due to a bug in the early chip versions, multiple filter
1316			   slots must be set for each address. */
1317			if (yp->drv_flags & HasMulticastBug) {
1318				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1319				hash_table[bit >> 4] |= (1 << bit);
1320				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1321				hash_table[bit >> 4] |= (1 << bit);
1322				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1323				hash_table[bit >> 4] |= (1 << bit);
1324			}
1325			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1326			hash_table[bit >> 4] |= (1 << bit);
1327		}
1328		/* Copy the hash table to the chip. */
1329		for (i = 0; i < 4; i++)
1330			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1331		iowrite16(0x0003, ioaddr + AddrMode);
1332	} else {					/* Normal, unicast/broadcast-only mode. */
1333		iowrite16(0x0001, ioaddr + AddrMode);
1334	}
1335	/* Restart the Rx process. */
1336	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1337}
1338
1339static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1340{
1341	struct yellowfin_private *np = netdev_priv(dev);
1342
1343	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1344	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1345	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1346}
1347
1348static const struct ethtool_ops ethtool_ops = {
1349	.get_drvinfo = yellowfin_get_drvinfo
1350};
1351
1352static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1353{
1354	struct yellowfin_private *np = netdev_priv(dev);
1355	void __iomem *ioaddr = np->base;
1356	struct mii_ioctl_data *data = if_mii(rq);
1357
1358	switch(cmd) {
1359	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1360		data->phy_id = np->phys[0] & 0x1f;
1361		fallthrough;
1362
1363	case SIOCGMIIREG:		/* Read MII PHY register. */
1364		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1365		return 0;
1366
1367	case SIOCSMIIREG:		/* Write MII PHY register. */
1368		if (data->phy_id == np->phys[0]) {
1369			u16 value = data->val_in;
1370			switch (data->reg_num) {
1371			case 0:
1372				/* Check for autonegotiation on or reset. */
1373				np->medialock = (value & 0x9000) ? 0 : 1;
1374				if (np->medialock)
1375					np->full_duplex = (value & 0x0100) ? 1 : 0;
1376				break;
1377			case 4: np->advertising = value; break;
1378			}
1379			/* Perhaps check_duplex(dev), depending on chip semantics. */
1380		}
1381		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1382		return 0;
1383	default:
1384		return -EOPNOTSUPP;
1385	}
1386}
1387
1388
1389static void yellowfin_remove_one(struct pci_dev *pdev)
1390{
1391	struct net_device *dev = pci_get_drvdata(pdev);
1392	struct yellowfin_private *np;
1393
1394	BUG_ON(!dev);
1395	np = netdev_priv(dev);
1396
1397	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1398			  np->tx_status_dma);
1399	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1400			  np->rx_ring_dma);
1401	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1402			  np->tx_ring_dma);
1403	unregister_netdev (dev);
1404
1405	pci_iounmap(pdev, np->base);
1406
1407	pci_release_regions (pdev);
1408
1409	free_netdev (dev);
1410}
1411
1412
1413static struct pci_driver yellowfin_driver = {
1414	.name		= DRV_NAME,
1415	.id_table	= yellowfin_pci_tbl,
1416	.probe		= yellowfin_init_one,
1417	.remove		= yellowfin_remove_one,
1418};
1419
1420
1421static int __init yellowfin_init (void)
1422{
1423/* when a module, this is printed whether or not devices are found in probe */
1424#ifdef MODULE
1425	printk(version);
1426#endif
1427	return pci_register_driver(&yellowfin_driver);
1428}
1429
1430
1431static void __exit yellowfin_cleanup (void)
1432{
1433	pci_unregister_driver (&yellowfin_driver);
1434}
1435
1436
1437module_init(yellowfin_init);
1438module_exit(yellowfin_cleanup);
v4.17
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_do_ioctl 		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 
 387
 388/* when built into the kernel, we only print version if device is found */
 389#ifndef MODULE
 390	static int printed_version;
 391	if (!printed_version++)
 392		printk(version);
 393#endif
 394
 395	i = pci_enable_device(pdev);
 396	if (i) return i;
 397
 398	dev = alloc_etherdev(sizeof(*np));
 399	if (!dev)
 400		return -ENOMEM;
 401
 402	SET_NETDEV_DEV(dev, &pdev->dev);
 403
 404	np = netdev_priv(dev);
 405
 406	if (pci_request_regions(pdev, DRV_NAME))
 407		goto err_out_free_netdev;
 408
 409	pci_set_master (pdev);
 410
 411	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 412	if (!ioaddr)
 413		goto err_out_free_res;
 414
 415	irq = pdev->irq;
 416
 417	if (drv_flags & DontUseEeprom)
 418		for (i = 0; i < 6; i++)
 419			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 420	else {
 421		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 422		for (i = 0; i < 6; i++)
 423			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 424	}
 
 425
 426	/* Reset the chip. */
 427	iowrite32(0x80000000, ioaddr + DMACtrl);
 428
 429	pci_set_drvdata(pdev, dev);
 430	spin_lock_init(&np->lock);
 431
 432	np->pci_dev = pdev;
 433	np->chip_id = chip_idx;
 434	np->drv_flags = drv_flags;
 435	np->base = ioaddr;
 436
 437	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 
 438	if (!ring_space)
 439		goto err_out_cleardev;
 440	np->tx_ring = ring_space;
 441	np->tx_ring_dma = ring_dma;
 442
 443	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 
 444	if (!ring_space)
 445		goto err_out_unmap_tx;
 446	np->rx_ring = ring_space;
 447	np->rx_ring_dma = ring_dma;
 448
 449	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 
 450	if (!ring_space)
 451		goto err_out_unmap_rx;
 452	np->tx_status = ring_space;
 453	np->tx_status_dma = ring_dma;
 454
 455	if (dev->mem_start)
 456		option = dev->mem_start;
 457
 458	/* The lower four bits are the media type. */
 459	if (option > 0) {
 460		if (option & 0x200)
 461			np->full_duplex = 1;
 462		np->default_port = option & 15;
 463		if (np->default_port)
 464			np->medialock = 1;
 465	}
 466	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 467		np->full_duplex = 1;
 468
 469	if (np->full_duplex)
 470		np->duplex_lock = 1;
 471
 472	/* The Yellowfin-specific entries in the device structure. */
 473	dev->netdev_ops = &netdev_ops;
 474	dev->ethtool_ops = &ethtool_ops;
 475	dev->watchdog_timeo = TX_TIMEOUT;
 476
 477	if (mtu)
 478		dev->mtu = mtu;
 479
 480	i = register_netdev(dev);
 481	if (i)
 482		goto err_out_unmap_status;
 483
 484	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 485		    pci_id_tbl[chip_idx].name,
 486		    ioread32(ioaddr + ChipRev), ioaddr,
 487		    dev->dev_addr, irq);
 488
 489	if (np->drv_flags & HasMII) {
 490		int phy, phy_idx = 0;
 491		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 492			int mii_status = mdio_read(ioaddr, phy, 1);
 493			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 494				np->phys[phy_idx++] = phy;
 495				np->advertising = mdio_read(ioaddr, phy, 4);
 496				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 497					    phy, mii_status, np->advertising);
 498			}
 499		}
 500		np->mii_cnt = phy_idx;
 501	}
 502
 503	find_cnt++;
 504
 505	return 0;
 506
 507err_out_unmap_status:
 508        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 509		np->tx_status_dma);
 510err_out_unmap_rx:
 511        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 
 512err_out_unmap_tx:
 513        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 514err_out_cleardev:
 515	pci_iounmap(pdev, ioaddr);
 516err_out_free_res:
 517	pci_release_regions(pdev);
 518err_out_free_netdev:
 519	free_netdev (dev);
 520	return -ENODEV;
 521}
 522
 523static int read_eeprom(void __iomem *ioaddr, int location)
 524{
 525	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 526
 527	iowrite8(location, ioaddr + EEAddr);
 528	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 529	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 530		;
 531	return ioread8(ioaddr + EERead);
 532}
 533
 534/* MII Managemen Data I/O accesses.
 535   These routines assume the MDIO controller is idle, and do not exit until
 536   the command is finished. */
 537
 538static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 539{
 540	int i;
 541
 542	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 543	iowrite16(1, ioaddr + MII_Cmd);
 544	for (i = 10000; i >= 0; i--)
 545		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 546			break;
 547	return ioread16(ioaddr + MII_Rd_Data);
 548}
 549
 550static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 551{
 552	int i;
 553
 554	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 555	iowrite16(value, ioaddr + MII_Wr_Data);
 556
 557	/* Wait for the command to finish. */
 558	for (i = 10000; i >= 0; i--)
 559		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 560			break;
 561}
 562
 563
 564static int yellowfin_open(struct net_device *dev)
 565{
 566	struct yellowfin_private *yp = netdev_priv(dev);
 567	const int irq = yp->pci_dev->irq;
 568	void __iomem *ioaddr = yp->base;
 569	int i, rc;
 570
 571	/* Reset the chip. */
 572	iowrite32(0x80000000, ioaddr + DMACtrl);
 573
 574	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 575	if (rc)
 576		return rc;
 577
 578	rc = yellowfin_init_ring(dev);
 579	if (rc < 0)
 580		goto err_free_irq;
 581
 582	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 583	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 584
 585	for (i = 0; i < 6; i++)
 586		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 587
 588	/* Set up various condition 'select' registers.
 589	   There are no options here. */
 590	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 591	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 592	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 593	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 594	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 595	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 596
 597	/* Initialize other registers: with so many this eventually this will
 598	   converted to an offset/value list. */
 599	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 600	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 601	/* Enable automatic generation of flow control frames, period 0xffff. */
 602	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 603
 604	yp->tx_threshold = 32;
 605	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 606
 607	if (dev->if_port == 0)
 608		dev->if_port = yp->default_port;
 609
 610	netif_start_queue(dev);
 611
 612	/* Setting the Rx mode will start the Rx process. */
 613	if (yp->drv_flags & IsGigabit) {
 614		/* We are always in full-duplex mode with gigabit! */
 615		yp->full_duplex = 1;
 616		iowrite16(0x01CF, ioaddr + Cnfg);
 617	} else {
 618		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 619		iowrite16(0x1018, ioaddr + FrameGap1);
 620		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 621	}
 622	set_rx_mode(dev);
 623
 624	/* Enable interrupts by setting the interrupt mask. */
 625	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 626	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 627	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 628	iowrite32(0x80008000, ioaddr + TxCtrl);
 629
 630	if (yellowfin_debug > 2) {
 631		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 632	}
 633
 634	/* Set the timer to check for link beat. */
 635	timer_setup(&yp->timer, yellowfin_timer, 0);
 636	yp->timer.expires = jiffies + 3*HZ;
 637	add_timer(&yp->timer);
 638out:
 639	return rc;
 640
 641err_free_irq:
 642	free_irq(irq, dev);
 643	goto out;
 644}
 645
 646static void yellowfin_timer(struct timer_list *t)
 647{
 648	struct yellowfin_private *yp = from_timer(yp, t, timer);
 649	struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 650	void __iomem *ioaddr = yp->base;
 651	int next_tick = 60*HZ;
 652
 653	if (yellowfin_debug > 3) {
 654		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 655			      ioread16(ioaddr + IntrStatus));
 656	}
 657
 658	if (yp->mii_cnt) {
 659		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 660		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 661		int negotiated = lpa & yp->advertising;
 662		if (yellowfin_debug > 1)
 663			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 664				      yp->phys[0], bmsr, lpa);
 665
 666		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 667
 668		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 669
 670		if (bmsr & BMSR_LSTATUS)
 671			next_tick = 60*HZ;
 672		else
 673			next_tick = 3*HZ;
 674	}
 675
 676	yp->timer.expires = jiffies + next_tick;
 677	add_timer(&yp->timer);
 678}
 679
 680static void yellowfin_tx_timeout(struct net_device *dev)
 681{
 682	struct yellowfin_private *yp = netdev_priv(dev);
 683	void __iomem *ioaddr = yp->base;
 684
 685	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 686		    yp->cur_tx, yp->dirty_tx,
 687		    ioread32(ioaddr + TxStatus),
 688		    ioread32(ioaddr + RxStatus));
 689
 690	/* Note: these should be KERN_DEBUG. */
 691	if (yellowfin_debug) {
 692		int i;
 693		pr_warn("  Rx ring %p: ", yp->rx_ring);
 694		for (i = 0; i < RX_RING_SIZE; i++)
 695			pr_cont(" %08x", yp->rx_ring[i].result_status);
 696		pr_cont("\n");
 697		pr_warn("  Tx ring %p: ", yp->tx_ring);
 698		for (i = 0; i < TX_RING_SIZE; i++)
 699			pr_cont(" %04x /%08x",
 700			       yp->tx_status[i].tx_errs,
 701			       yp->tx_ring[i].result_status);
 702		pr_cont("\n");
 703	}
 704
 705	/* If the hardware is found to hang regularly, we will update the code
 706	   to reinitialize the chip here. */
 707	dev->if_port = 0;
 708
 709	/* Wake the potentially-idle transmit channel. */
 710	iowrite32(0x10001000, yp->base + TxCtrl);
 711	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 712		netif_wake_queue (dev);		/* Typical path */
 713
 714	netif_trans_update(dev); /* prevent tx timeout */
 715	dev->stats.tx_errors++;
 716}
 717
 718/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 719static int yellowfin_init_ring(struct net_device *dev)
 720{
 721	struct yellowfin_private *yp = netdev_priv(dev);
 722	int i, j;
 723
 724	yp->tx_full = 0;
 725	yp->cur_rx = yp->cur_tx = 0;
 726	yp->dirty_tx = 0;
 727
 728	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 729
 730	for (i = 0; i < RX_RING_SIZE; i++) {
 731		yp->rx_ring[i].dbdma_cmd =
 732			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 733		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 734			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 735	}
 736
 737	for (i = 0; i < RX_RING_SIZE; i++) {
 738		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 739		yp->rx_skbuff[i] = skb;
 740		if (skb == NULL)
 741			break;
 742		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 743		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 744			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
 745	}
 746	if (i != RX_RING_SIZE) {
 747		for (j = 0; j < i; j++)
 748			dev_kfree_skb(yp->rx_skbuff[j]);
 749		return -ENOMEM;
 750	}
 751	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 752	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 753
 754#define NO_TXSTATS
 755#ifdef NO_TXSTATS
 756	/* In this mode the Tx ring needs only a single descriptor. */
 757	for (i = 0; i < TX_RING_SIZE; i++) {
 758		yp->tx_skbuff[i] = NULL;
 759		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 760		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 761			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 762	}
 763	/* Wrap ring */
 764	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 765#else
 766{
 767	/* Tx ring needs a pair of descriptors, the second for the status. */
 768	for (i = 0; i < TX_RING_SIZE; i++) {
 769		j = 2*i;
 770		yp->tx_skbuff[i] = 0;
 771		/* Branch on Tx error. */
 772		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 773		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 774			(j+1)*sizeof(struct yellowfin_desc));
 775		j++;
 776		if (yp->flags & FullTxStatus) {
 777			yp->tx_ring[j].dbdma_cmd =
 778				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 779			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 780			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 781				i*sizeof(struct tx_status_words));
 782		} else {
 783			/* Symbios chips write only tx_errs word. */
 784			yp->tx_ring[j].dbdma_cmd =
 785				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 786			yp->tx_ring[j].request_cnt = 2;
 787			/* Om pade ummmmm... */
 788			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 789				i*sizeof(struct tx_status_words) +
 790				&(yp->tx_status[0].tx_errs) -
 791				&(yp->tx_status[0]));
 792		}
 793		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 794			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 795	}
 796	/* Wrap ring */
 797	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 798}
 799#endif
 800	yp->tx_tail_desc = &yp->tx_status[0];
 801	return 0;
 802}
 803
 804static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 805					struct net_device *dev)
 806{
 807	struct yellowfin_private *yp = netdev_priv(dev);
 808	unsigned entry;
 809	int len = skb->len;
 810
 811	netif_stop_queue (dev);
 812
 813	/* Note: Ordering is important here, set the field with the
 814	   "ownership" bit last, and only then increment cur_tx. */
 815
 816	/* Calculate the next Tx descriptor entry. */
 817	entry = yp->cur_tx % TX_RING_SIZE;
 818
 819	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 820		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 821		/* Fix GX chipset errata. */
 822		if (cacheline_end > 24  || cacheline_end == 0) {
 823			len = skb->len + 32 - cacheline_end + 1;
 824			if (skb_padto(skb, len)) {
 825				yp->tx_skbuff[entry] = NULL;
 826				netif_wake_queue(dev);
 827				return NETDEV_TX_OK;
 828			}
 829		}
 830	}
 831	yp->tx_skbuff[entry] = skb;
 832
 833#ifdef NO_TXSTATS
 834	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 835		skb->data, len, PCI_DMA_TODEVICE));
 
 836	yp->tx_ring[entry].result_status = 0;
 837	if (entry >= TX_RING_SIZE-1) {
 838		/* New stop command. */
 839		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 840		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 841			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 842	} else {
 843		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 844		yp->tx_ring[entry].dbdma_cmd =
 845			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 846	}
 847	yp->cur_tx++;
 848#else
 849	yp->tx_ring[entry<<1].request_cnt = len;
 850	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 851		skb->data, len, PCI_DMA_TODEVICE));
 
 852	/* The input_last (status-write) command is constant, but we must
 853	   rewrite the subsequent 'stop' command. */
 854
 855	yp->cur_tx++;
 856	{
 857		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 858		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 859	}
 860	/* Final step -- overwrite the old 'stop' command. */
 861
 862	yp->tx_ring[entry<<1].dbdma_cmd =
 863		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 864					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 865#endif
 866
 867	/* Non-x86 Todo: explicitly flush cache lines here. */
 868
 869	/* Wake the potentially-idle transmit channel. */
 870	iowrite32(0x10001000, yp->base + TxCtrl);
 871
 872	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 873		netif_start_queue (dev);		/* Typical path */
 874	else
 875		yp->tx_full = 1;
 876
 877	if (yellowfin_debug > 4) {
 878		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 879			      yp->cur_tx, entry);
 880	}
 881	return NETDEV_TX_OK;
 882}
 883
 884/* The interrupt handler does all of the Rx thread work and cleans up
 885   after the Tx thread. */
 886static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 887{
 888	struct net_device *dev = dev_instance;
 889	struct yellowfin_private *yp;
 890	void __iomem *ioaddr;
 891	int boguscnt = max_interrupt_work;
 892	unsigned int handled = 0;
 893
 894	yp = netdev_priv(dev);
 895	ioaddr = yp->base;
 896
 897	spin_lock (&yp->lock);
 898
 899	do {
 900		u16 intr_status = ioread16(ioaddr + IntrClear);
 901
 902		if (yellowfin_debug > 4)
 903			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 904				      intr_status);
 905
 906		if (intr_status == 0)
 907			break;
 908		handled = 1;
 909
 910		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 911			yellowfin_rx(dev);
 912			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 913		}
 914
 915#ifdef NO_TXSTATS
 916		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 917			int entry = yp->dirty_tx % TX_RING_SIZE;
 918			struct sk_buff *skb;
 919
 920			if (yp->tx_ring[entry].result_status == 0)
 921				break;
 922			skb = yp->tx_skbuff[entry];
 923			dev->stats.tx_packets++;
 924			dev->stats.tx_bytes += skb->len;
 925			/* Free the original skb. */
 926			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 927				skb->len, PCI_DMA_TODEVICE);
 928			dev_kfree_skb_irq(skb);
 
 929			yp->tx_skbuff[entry] = NULL;
 930		}
 931		if (yp->tx_full &&
 932		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 933			/* The ring is no longer full, clear tbusy. */
 934			yp->tx_full = 0;
 935			netif_wake_queue(dev);
 936		}
 937#else
 938		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 939			unsigned dirty_tx = yp->dirty_tx;
 940
 941			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 942				 dirty_tx++) {
 943				/* Todo: optimize this. */
 944				int entry = dirty_tx % TX_RING_SIZE;
 945				u16 tx_errs = yp->tx_status[entry].tx_errs;
 946				struct sk_buff *skb;
 947
 948#ifndef final_version
 949				if (yellowfin_debug > 5)
 950					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 951						      entry,
 952						      yp->tx_status[entry].tx_cnt,
 953						      yp->tx_status[entry].tx_errs,
 954						      yp->tx_status[entry].total_tx_cnt,
 955						      yp->tx_status[entry].paused);
 956#endif
 957				if (tx_errs == 0)
 958					break;	/* It still hasn't been Txed */
 959				skb = yp->tx_skbuff[entry];
 960				if (tx_errs & 0xF810) {
 961					/* There was an major error, log it. */
 962#ifndef final_version
 963					if (yellowfin_debug > 1)
 964						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 965							      tx_errs);
 966#endif
 967					dev->stats.tx_errors++;
 968					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 969					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 970					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 971					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 972				} else {
 973#ifndef final_version
 974					if (yellowfin_debug > 4)
 975						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 976							      tx_errs);
 977#endif
 978					dev->stats.tx_bytes += skb->len;
 979					dev->stats.collisions += tx_errs & 15;
 980					dev->stats.tx_packets++;
 981				}
 982				/* Free the original skb. */
 983				pci_unmap_single(yp->pci_dev,
 984					yp->tx_ring[entry<<1].addr, skb->len,
 985					PCI_DMA_TODEVICE);
 986				dev_kfree_skb_irq(skb);
 987				yp->tx_skbuff[entry] = 0;
 988				/* Mark status as empty. */
 989				yp->tx_status[entry].tx_errs = 0;
 990			}
 991
 992#ifndef final_version
 993			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
 994				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 995					   dirty_tx, yp->cur_tx, yp->tx_full);
 996				dirty_tx += TX_RING_SIZE;
 997			}
 998#endif
 999
1000			if (yp->tx_full &&
1001			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1002				/* The ring is no longer full, clear tbusy. */
1003				yp->tx_full = 0;
1004				netif_wake_queue(dev);
1005			}
1006
1007			yp->dirty_tx = dirty_tx;
1008			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1009		}
1010#endif
1011
1012		/* Log errors and other uncommon events. */
1013		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1014			yellowfin_error(dev, intr_status);
1015
1016		if (--boguscnt < 0) {
1017			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1018				    intr_status);
1019			break;
1020		}
1021	} while (1);
1022
1023	if (yellowfin_debug > 3)
1024		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1025			      ioread16(ioaddr + IntrStatus));
1026
1027	spin_unlock (&yp->lock);
1028	return IRQ_RETVAL(handled);
1029}
1030
1031/* This routine is logically part of the interrupt handler, but separated
1032   for clarity and better register allocation. */
1033static int yellowfin_rx(struct net_device *dev)
1034{
1035	struct yellowfin_private *yp = netdev_priv(dev);
1036	int entry = yp->cur_rx % RX_RING_SIZE;
1037	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1038
1039	if (yellowfin_debug > 4) {
1040		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1041			   entry, yp->rx_ring[entry].result_status);
1042		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1043			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1044			   yp->rx_ring[entry].result_status);
1045	}
1046
1047	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1048	while (1) {
1049		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1050		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1051		s16 frame_status;
1052		u16 desc_status;
1053		int data_size, yf_size;
1054		u8 *buf_addr;
1055
1056		if(!desc->result_status)
1057			break;
1058		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1059			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
1060		desc_status = le32_to_cpu(desc->result_status) >> 16;
1061		buf_addr = rx_skb->data;
1062		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1063			le32_to_cpu(desc->result_status)) & 0xffff;
1064		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1065		if (yellowfin_debug > 4)
1066			printk(KERN_DEBUG "  %s() status was %04x\n",
1067			       __func__, frame_status);
1068		if (--boguscnt < 0)
1069			break;
1070
1071		yf_size = sizeof(struct yellowfin_desc);
1072
1073		if ( ! (desc_status & RX_EOP)) {
1074			if (data_size != 0)
1075				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1076					    desc_status, data_size);
1077			dev->stats.rx_length_errors++;
1078		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1079			/* There was a error. */
1080			if (yellowfin_debug > 3)
1081				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1082				       __func__, frame_status);
1083			dev->stats.rx_errors++;
1084			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1085			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1086			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1087			if (frame_status < 0) dev->stats.rx_dropped++;
1088		} else if ( !(yp->drv_flags & IsGigabit)  &&
1089				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1090			u8 status1 = buf_addr[data_size-2];
1091			u8 status2 = buf_addr[data_size-1];
1092			dev->stats.rx_errors++;
1093			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1094			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1095			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1096			if (status2 & 0x80) dev->stats.rx_dropped++;
1097#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1098		} else if ((yp->flags & HasMACAddrBug)  &&
1099			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1100						      entry * yf_size),
1101					  dev->dev_addr) &&
1102			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1103						      entry * yf_size),
1104					  "\377\377\377\377\377\377")) {
1105			if (bogus_rx++ == 0)
1106				netdev_warn(dev, "Bad frame to %pM\n",
1107					    buf_addr);
1108#endif
1109		} else {
1110			struct sk_buff *skb;
1111			int pkt_len = data_size -
1112				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1113			/* To verify: Yellowfin Length should omit the CRC! */
1114
1115#ifndef final_version
1116			if (yellowfin_debug > 4)
1117				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1118				       __func__, pkt_len, data_size, boguscnt);
1119#endif
1120			/* Check if the packet is long enough to just pass up the skbuff
1121			   without copying to a properly sized skbuff. */
1122			if (pkt_len > rx_copybreak) {
1123				skb_put(skb = rx_skb, pkt_len);
1124				pci_unmap_single(yp->pci_dev,
1125					le32_to_cpu(yp->rx_ring[entry].addr),
1126					yp->rx_buf_sz,
1127					PCI_DMA_FROMDEVICE);
1128				yp->rx_skbuff[entry] = NULL;
1129			} else {
1130				skb = netdev_alloc_skb(dev, pkt_len + 2);
1131				if (skb == NULL)
1132					break;
1133				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1134				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1135				skb_put(skb, pkt_len);
1136				pci_dma_sync_single_for_device(yp->pci_dev,
1137								le32_to_cpu(desc->addr),
1138								yp->rx_buf_sz,
1139								PCI_DMA_FROMDEVICE);
1140			}
1141			skb->protocol = eth_type_trans(skb, dev);
1142			netif_rx(skb);
1143			dev->stats.rx_packets++;
1144			dev->stats.rx_bytes += pkt_len;
1145		}
1146		entry = (++yp->cur_rx) % RX_RING_SIZE;
1147	}
1148
1149	/* Refill the Rx ring buffers. */
1150	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1151		entry = yp->dirty_rx % RX_RING_SIZE;
1152		if (yp->rx_skbuff[entry] == NULL) {
1153			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1154			if (skb == NULL)
1155				break;				/* Better luck next round. */
1156			yp->rx_skbuff[entry] = skb;
1157			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1158			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1159				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
1160		}
1161		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1162		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1163		if (entry != 0)
1164			yp->rx_ring[entry - 1].dbdma_cmd =
1165				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1166		else
1167			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1168				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1169							| yp->rx_buf_sz);
1170	}
1171
1172	return 0;
1173}
1174
1175static void yellowfin_error(struct net_device *dev, int intr_status)
1176{
1177	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1178	/* Hmmmmm, it's not clear what to do here. */
1179	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1180		dev->stats.tx_errors++;
1181	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1182		dev->stats.rx_errors++;
1183}
1184
1185static int yellowfin_close(struct net_device *dev)
1186{
1187	struct yellowfin_private *yp = netdev_priv(dev);
1188	void __iomem *ioaddr = yp->base;
1189	int i;
1190
1191	netif_stop_queue (dev);
1192
1193	if (yellowfin_debug > 1) {
1194		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1195			      ioread16(ioaddr + TxStatus),
1196			      ioread16(ioaddr + RxStatus),
1197			      ioread16(ioaddr + IntrStatus));
1198		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1199			      yp->cur_tx, yp->dirty_tx,
1200			      yp->cur_rx, yp->dirty_rx);
1201	}
1202
1203	/* Disable interrupts by clearing the interrupt mask. */
1204	iowrite16(0x0000, ioaddr + IntrEnb);
1205
1206	/* Stop the chip's Tx and Rx processes. */
1207	iowrite32(0x80000000, ioaddr + RxCtrl);
1208	iowrite32(0x80000000, ioaddr + TxCtrl);
1209
1210	del_timer(&yp->timer);
1211
1212#if defined(__i386__)
1213	if (yellowfin_debug > 2) {
1214		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1215				(unsigned long long)yp->tx_ring_dma);
1216		for (i = 0; i < TX_RING_SIZE*2; i++)
1217			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1218				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1219				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1220				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1221		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1222		for (i = 0; i < TX_RING_SIZE; i++)
1223			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1224				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1225				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1226
1227		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1228				(unsigned long long)yp->rx_ring_dma);
1229		for (i = 0; i < RX_RING_SIZE; i++) {
1230			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1231				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1232				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1233				   yp->rx_ring[i].result_status);
1234			if (yellowfin_debug > 6) {
1235				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1236					int j;
1237
1238					printk(KERN_DEBUG);
1239					for (j = 0; j < 0x50; j++)
1240						pr_cont(" %04x",
1241							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1242					pr_cont("\n");
1243				}
1244			}
1245		}
1246	}
1247#endif /* __i386__ debugging only */
1248
1249	free_irq(yp->pci_dev->irq, dev);
1250
1251	/* Free all the skbuffs in the Rx queue. */
1252	for (i = 0; i < RX_RING_SIZE; i++) {
1253		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1254		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1255		if (yp->rx_skbuff[i]) {
1256			dev_kfree_skb(yp->rx_skbuff[i]);
1257		}
1258		yp->rx_skbuff[i] = NULL;
1259	}
1260	for (i = 0; i < TX_RING_SIZE; i++) {
1261		if (yp->tx_skbuff[i])
1262			dev_kfree_skb(yp->tx_skbuff[i]);
1263		yp->tx_skbuff[i] = NULL;
1264	}
1265
1266#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1267	if (yellowfin_debug > 0) {
1268		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1269			      bogus_rx);
1270	}
1271#endif
1272
1273	return 0;
1274}
1275
1276/* Set or clear the multicast filter for this adaptor. */
1277
1278static void set_rx_mode(struct net_device *dev)
1279{
1280	struct yellowfin_private *yp = netdev_priv(dev);
1281	void __iomem *ioaddr = yp->base;
1282	u16 cfg_value = ioread16(ioaddr + Cnfg);
1283
1284	/* Stop the Rx process to change any value. */
1285	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1286	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1287		iowrite16(0x000F, ioaddr + AddrMode);
1288	} else if ((netdev_mc_count(dev) > 64) ||
1289		   (dev->flags & IFF_ALLMULTI)) {
1290		/* Too many to filter well, or accept all multicasts. */
1291		iowrite16(0x000B, ioaddr + AddrMode);
1292	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1293		struct netdev_hw_addr *ha;
1294		u16 hash_table[4];
1295		int i;
1296
1297		memset(hash_table, 0, sizeof(hash_table));
1298		netdev_for_each_mc_addr(ha, dev) {
1299			unsigned int bit;
1300
1301			/* Due to a bug in the early chip versions, multiple filter
1302			   slots must be set for each address. */
1303			if (yp->drv_flags & HasMulticastBug) {
1304				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1305				hash_table[bit >> 4] |= (1 << bit);
1306				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1307				hash_table[bit >> 4] |= (1 << bit);
1308				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1309				hash_table[bit >> 4] |= (1 << bit);
1310			}
1311			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1312			hash_table[bit >> 4] |= (1 << bit);
1313		}
1314		/* Copy the hash table to the chip. */
1315		for (i = 0; i < 4; i++)
1316			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1317		iowrite16(0x0003, ioaddr + AddrMode);
1318	} else {					/* Normal, unicast/broadcast-only mode. */
1319		iowrite16(0x0001, ioaddr + AddrMode);
1320	}
1321	/* Restart the Rx process. */
1322	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1323}
1324
1325static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1326{
1327	struct yellowfin_private *np = netdev_priv(dev);
1328
1329	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1330	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1331	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1332}
1333
1334static const struct ethtool_ops ethtool_ops = {
1335	.get_drvinfo = yellowfin_get_drvinfo
1336};
1337
1338static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1339{
1340	struct yellowfin_private *np = netdev_priv(dev);
1341	void __iomem *ioaddr = np->base;
1342	struct mii_ioctl_data *data = if_mii(rq);
1343
1344	switch(cmd) {
1345	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1346		data->phy_id = np->phys[0] & 0x1f;
1347		/* Fall Through */
1348
1349	case SIOCGMIIREG:		/* Read MII PHY register. */
1350		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1351		return 0;
1352
1353	case SIOCSMIIREG:		/* Write MII PHY register. */
1354		if (data->phy_id == np->phys[0]) {
1355			u16 value = data->val_in;
1356			switch (data->reg_num) {
1357			case 0:
1358				/* Check for autonegotiation on or reset. */
1359				np->medialock = (value & 0x9000) ? 0 : 1;
1360				if (np->medialock)
1361					np->full_duplex = (value & 0x0100) ? 1 : 0;
1362				break;
1363			case 4: np->advertising = value; break;
1364			}
1365			/* Perhaps check_duplex(dev), depending on chip semantics. */
1366		}
1367		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1368		return 0;
1369	default:
1370		return -EOPNOTSUPP;
1371	}
1372}
1373
1374
1375static void yellowfin_remove_one(struct pci_dev *pdev)
1376{
1377	struct net_device *dev = pci_get_drvdata(pdev);
1378	struct yellowfin_private *np;
1379
1380	BUG_ON(!dev);
1381	np = netdev_priv(dev);
1382
1383        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1384		np->tx_status_dma);
1385	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1386	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 
1387	unregister_netdev (dev);
1388
1389	pci_iounmap(pdev, np->base);
1390
1391	pci_release_regions (pdev);
1392
1393	free_netdev (dev);
1394}
1395
1396
1397static struct pci_driver yellowfin_driver = {
1398	.name		= DRV_NAME,
1399	.id_table	= yellowfin_pci_tbl,
1400	.probe		= yellowfin_init_one,
1401	.remove		= yellowfin_remove_one,
1402};
1403
1404
1405static int __init yellowfin_init (void)
1406{
1407/* when a module, this is printed whether or not devices are found in probe */
1408#ifdef MODULE
1409	printk(version);
1410#endif
1411	return pci_register_driver(&yellowfin_driver);
1412}
1413
1414
1415static void __exit yellowfin_cleanup (void)
1416{
1417	pci_unregister_driver (&yellowfin_driver);
1418}
1419
1420
1421module_init(yellowfin_init);
1422module_exit(yellowfin_cleanup);