Linux Audio

Check our new training course

Loading...
v6.8
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verify the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_eth_ioctl		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 387	u8 addr[ETH_ALEN];
 388
 389/* when built into the kernel, we only print version if device is found */
 390#ifndef MODULE
 391	static int printed_version;
 392	if (!printed_version++)
 393		printk(version);
 394#endif
 395
 396	i = pci_enable_device(pdev);
 397	if (i) return i;
 398
 399	dev = alloc_etherdev(sizeof(*np));
 400	if (!dev)
 401		return -ENOMEM;
 402
 403	SET_NETDEV_DEV(dev, &pdev->dev);
 404
 405	np = netdev_priv(dev);
 406
 407	if (pci_request_regions(pdev, DRV_NAME))
 408		goto err_out_free_netdev;
 409
 410	pci_set_master (pdev);
 411
 412	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 413	if (!ioaddr)
 414		goto err_out_free_res;
 415
 416	irq = pdev->irq;
 417
 418	if (drv_flags & DontUseEeprom)
 419		for (i = 0; i < 6; i++)
 420			addr[i] = ioread8(ioaddr + StnAddr + i);
 421	else {
 422		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 423		for (i = 0; i < 6; i++)
 424			addr[i] = read_eeprom(ioaddr, ee_offset + i);
 425	}
 426	eth_hw_addr_set(dev, addr);
 427
 428	/* Reset the chip. */
 429	iowrite32(0x80000000, ioaddr + DMACtrl);
 430
 431	pci_set_drvdata(pdev, dev);
 432	spin_lock_init(&np->lock);
 433
 434	np->pci_dev = pdev;
 435	np->chip_id = chip_idx;
 436	np->drv_flags = drv_flags;
 437	np->base = ioaddr;
 438
 439	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 440					GFP_KERNEL);
 441	if (!ring_space)
 442		goto err_out_cleardev;
 443	np->tx_ring = ring_space;
 444	np->tx_ring_dma = ring_dma;
 445
 446	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 447					GFP_KERNEL);
 448	if (!ring_space)
 449		goto err_out_unmap_tx;
 450	np->rx_ring = ring_space;
 451	np->rx_ring_dma = ring_dma;
 452
 453	ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
 454					&ring_dma, GFP_KERNEL);
 455	if (!ring_space)
 456		goto err_out_unmap_rx;
 457	np->tx_status = ring_space;
 458	np->tx_status_dma = ring_dma;
 459
 460	if (dev->mem_start)
 461		option = dev->mem_start;
 462
 463	/* The lower four bits are the media type. */
 464	if (option > 0) {
 465		if (option & 0x200)
 466			np->full_duplex = 1;
 467		np->default_port = option & 15;
 468		if (np->default_port)
 469			np->medialock = 1;
 470	}
 471	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 472		np->full_duplex = 1;
 473
 474	if (np->full_duplex)
 475		np->duplex_lock = 1;
 476
 477	/* The Yellowfin-specific entries in the device structure. */
 478	dev->netdev_ops = &netdev_ops;
 479	dev->ethtool_ops = &ethtool_ops;
 480	dev->watchdog_timeo = TX_TIMEOUT;
 481
 482	if (mtu)
 483		dev->mtu = mtu;
 484
 485	i = register_netdev(dev);
 486	if (i)
 487		goto err_out_unmap_status;
 488
 489	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 490		    pci_id_tbl[chip_idx].name,
 491		    ioread32(ioaddr + ChipRev), ioaddr,
 492		    dev->dev_addr, irq);
 493
 494	if (np->drv_flags & HasMII) {
 495		int phy, phy_idx = 0;
 496		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 497			int mii_status = mdio_read(ioaddr, phy, 1);
 498			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 499				np->phys[phy_idx++] = phy;
 500				np->advertising = mdio_read(ioaddr, phy, 4);
 501				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 502					    phy, mii_status, np->advertising);
 503			}
 504		}
 505		np->mii_cnt = phy_idx;
 506	}
 507
 508	find_cnt++;
 509
 510	return 0;
 511
 512err_out_unmap_status:
 513	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
 514			  np->tx_status_dma);
 515err_out_unmap_rx:
 516	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 517			  np->rx_ring_dma);
 518err_out_unmap_tx:
 519	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 520			  np->tx_ring_dma);
 521err_out_cleardev:
 522	pci_iounmap(pdev, ioaddr);
 523err_out_free_res:
 524	pci_release_regions(pdev);
 525err_out_free_netdev:
 526	free_netdev (dev);
 527	return -ENODEV;
 528}
 529
 530static int read_eeprom(void __iomem *ioaddr, int location)
 531{
 532	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 533
 534	iowrite8(location, ioaddr + EEAddr);
 535	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 536	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 537		;
 538	return ioread8(ioaddr + EERead);
 539}
 540
 541/* MII Managemen Data I/O accesses.
 542   These routines assume the MDIO controller is idle, and do not exit until
 543   the command is finished. */
 544
 545static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 546{
 547	int i;
 548
 549	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 550	iowrite16(1, ioaddr + MII_Cmd);
 551	for (i = 10000; i >= 0; i--)
 552		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 553			break;
 554	return ioread16(ioaddr + MII_Rd_Data);
 555}
 556
 557static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 558{
 559	int i;
 560
 561	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 562	iowrite16(value, ioaddr + MII_Wr_Data);
 563
 564	/* Wait for the command to finish. */
 565	for (i = 10000; i >= 0; i--)
 566		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 567			break;
 568}
 569
 570
 571static int yellowfin_open(struct net_device *dev)
 572{
 573	struct yellowfin_private *yp = netdev_priv(dev);
 574	const int irq = yp->pci_dev->irq;
 575	void __iomem *ioaddr = yp->base;
 576	int i, rc;
 577
 578	/* Reset the chip. */
 579	iowrite32(0x80000000, ioaddr + DMACtrl);
 580
 581	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 582	if (rc)
 583		return rc;
 584
 585	rc = yellowfin_init_ring(dev);
 586	if (rc < 0)
 587		goto err_free_irq;
 588
 589	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 590	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 591
 592	for (i = 0; i < 6; i++)
 593		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 594
 595	/* Set up various condition 'select' registers.
 596	   There are no options here. */
 597	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 598	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 599	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 600	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 601	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 602	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 603
 604	/* Initialize other registers: with so many this eventually this will
 605	   converted to an offset/value list. */
 606	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 607	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 608	/* Enable automatic generation of flow control frames, period 0xffff. */
 609	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 610
 611	yp->tx_threshold = 32;
 612	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 613
 614	if (dev->if_port == 0)
 615		dev->if_port = yp->default_port;
 616
 617	netif_start_queue(dev);
 618
 619	/* Setting the Rx mode will start the Rx process. */
 620	if (yp->drv_flags & IsGigabit) {
 621		/* We are always in full-duplex mode with gigabit! */
 622		yp->full_duplex = 1;
 623		iowrite16(0x01CF, ioaddr + Cnfg);
 624	} else {
 625		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 626		iowrite16(0x1018, ioaddr + FrameGap1);
 627		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 628	}
 629	set_rx_mode(dev);
 630
 631	/* Enable interrupts by setting the interrupt mask. */
 632	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 633	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 634	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 635	iowrite32(0x80008000, ioaddr + TxCtrl);
 636
 637	if (yellowfin_debug > 2) {
 638		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 639	}
 640
 641	/* Set the timer to check for link beat. */
 642	timer_setup(&yp->timer, yellowfin_timer, 0);
 643	yp->timer.expires = jiffies + 3*HZ;
 
 
 644	add_timer(&yp->timer);
 645out:
 646	return rc;
 647
 648err_free_irq:
 649	free_irq(irq, dev);
 650	goto out;
 651}
 652
 653static void yellowfin_timer(struct timer_list *t)
 654{
 655	struct yellowfin_private *yp = from_timer(yp, t, timer);
 656	struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 657	void __iomem *ioaddr = yp->base;
 658	int next_tick = 60*HZ;
 659
 660	if (yellowfin_debug > 3) {
 661		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 662			      ioread16(ioaddr + IntrStatus));
 663	}
 664
 665	if (yp->mii_cnt) {
 666		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 667		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 668		int negotiated = lpa & yp->advertising;
 669		if (yellowfin_debug > 1)
 670			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 671				      yp->phys[0], bmsr, lpa);
 672
 673		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 674
 675		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 676
 677		if (bmsr & BMSR_LSTATUS)
 678			next_tick = 60*HZ;
 679		else
 680			next_tick = 3*HZ;
 681	}
 682
 683	yp->timer.expires = jiffies + next_tick;
 684	add_timer(&yp->timer);
 685}
 686
 687static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
 688{
 689	struct yellowfin_private *yp = netdev_priv(dev);
 690	void __iomem *ioaddr = yp->base;
 691
 692	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 693		    yp->cur_tx, yp->dirty_tx,
 694		    ioread32(ioaddr + TxStatus),
 695		    ioread32(ioaddr + RxStatus));
 696
 697	/* Note: these should be KERN_DEBUG. */
 698	if (yellowfin_debug) {
 699		int i;
 700		pr_warn("  Rx ring %p: ", yp->rx_ring);
 701		for (i = 0; i < RX_RING_SIZE; i++)
 702			pr_cont(" %08x", yp->rx_ring[i].result_status);
 703		pr_cont("\n");
 704		pr_warn("  Tx ring %p: ", yp->tx_ring);
 705		for (i = 0; i < TX_RING_SIZE; i++)
 706			pr_cont(" %04x /%08x",
 707			       yp->tx_status[i].tx_errs,
 708			       yp->tx_ring[i].result_status);
 709		pr_cont("\n");
 710	}
 711
 712	/* If the hardware is found to hang regularly, we will update the code
 713	   to reinitialize the chip here. */
 714	dev->if_port = 0;
 715
 716	/* Wake the potentially-idle transmit channel. */
 717	iowrite32(0x10001000, yp->base + TxCtrl);
 718	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 719		netif_wake_queue (dev);		/* Typical path */
 720
 721	netif_trans_update(dev); /* prevent tx timeout */
 722	dev->stats.tx_errors++;
 723}
 724
 725/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 726static int yellowfin_init_ring(struct net_device *dev)
 727{
 728	struct yellowfin_private *yp = netdev_priv(dev);
 729	int i, j;
 730
 731	yp->tx_full = 0;
 732	yp->cur_rx = yp->cur_tx = 0;
 733	yp->dirty_tx = 0;
 734
 735	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 736
 737	for (i = 0; i < RX_RING_SIZE; i++) {
 738		yp->rx_ring[i].dbdma_cmd =
 739			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 740		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 741			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 742	}
 743
 744	for (i = 0; i < RX_RING_SIZE; i++) {
 745		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 746		yp->rx_skbuff[i] = skb;
 747		if (skb == NULL)
 748			break;
 749		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 750		yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 751								 skb->data,
 752								 yp->rx_buf_sz,
 753								 DMA_FROM_DEVICE));
 754	}
 755	if (i != RX_RING_SIZE) {
 756		for (j = 0; j < i; j++)
 757			dev_kfree_skb(yp->rx_skbuff[j]);
 758		return -ENOMEM;
 759	}
 760	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 761	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 762
 763#define NO_TXSTATS
 764#ifdef NO_TXSTATS
 765	/* In this mode the Tx ring needs only a single descriptor. */
 766	for (i = 0; i < TX_RING_SIZE; i++) {
 767		yp->tx_skbuff[i] = NULL;
 768		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 769		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 770			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 771	}
 772	/* Wrap ring */
 773	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 774#else
 775{
 776	/* Tx ring needs a pair of descriptors, the second for the status. */
 777	for (i = 0; i < TX_RING_SIZE; i++) {
 778		j = 2*i;
 779		yp->tx_skbuff[i] = 0;
 780		/* Branch on Tx error. */
 781		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 782		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 783			(j+1)*sizeof(struct yellowfin_desc));
 784		j++;
 785		if (yp->flags & FullTxStatus) {
 786			yp->tx_ring[j].dbdma_cmd =
 787				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 788			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 789			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 790				i*sizeof(struct tx_status_words));
 791		} else {
 792			/* Symbios chips write only tx_errs word. */
 793			yp->tx_ring[j].dbdma_cmd =
 794				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 795			yp->tx_ring[j].request_cnt = 2;
 796			/* Om pade ummmmm... */
 797			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 798				i*sizeof(struct tx_status_words) +
 799				&(yp->tx_status[0].tx_errs) -
 800				&(yp->tx_status[0]));
 801		}
 802		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 803			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 804	}
 805	/* Wrap ring */
 806	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 807}
 808#endif
 809	yp->tx_tail_desc = &yp->tx_status[0];
 810	return 0;
 811}
 812
 813static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 814					struct net_device *dev)
 815{
 816	struct yellowfin_private *yp = netdev_priv(dev);
 817	unsigned entry;
 818	int len = skb->len;
 819
 820	netif_stop_queue (dev);
 821
 822	/* Note: Ordering is important here, set the field with the
 823	   "ownership" bit last, and only then increment cur_tx. */
 824
 825	/* Calculate the next Tx descriptor entry. */
 826	entry = yp->cur_tx % TX_RING_SIZE;
 827
 828	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 829		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 830		/* Fix GX chipset errata. */
 831		if (cacheline_end > 24  || cacheline_end == 0) {
 832			len = skb->len + 32 - cacheline_end + 1;
 833			if (skb_padto(skb, len)) {
 834				yp->tx_skbuff[entry] = NULL;
 835				netif_wake_queue(dev);
 836				return NETDEV_TX_OK;
 837			}
 838		}
 839	}
 840	yp->tx_skbuff[entry] = skb;
 841
 842#ifdef NO_TXSTATS
 843	yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 844							     skb->data,
 845							     len, DMA_TO_DEVICE));
 846	yp->tx_ring[entry].result_status = 0;
 847	if (entry >= TX_RING_SIZE-1) {
 848		/* New stop command. */
 849		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 850		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 851			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 852	} else {
 853		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 854		yp->tx_ring[entry].dbdma_cmd =
 855			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 856	}
 857	yp->cur_tx++;
 858#else
 859	yp->tx_ring[entry<<1].request_cnt = len;
 860	yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 861								skb->data,
 862								len, DMA_TO_DEVICE));
 863	/* The input_last (status-write) command is constant, but we must
 864	   rewrite the subsequent 'stop' command. */
 865
 866	yp->cur_tx++;
 867	{
 868		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 869		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 870	}
 871	/* Final step -- overwrite the old 'stop' command. */
 872
 873	yp->tx_ring[entry<<1].dbdma_cmd =
 874		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 875					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 876#endif
 877
 878	/* Non-x86 Todo: explicitly flush cache lines here. */
 879
 880	/* Wake the potentially-idle transmit channel. */
 881	iowrite32(0x10001000, yp->base + TxCtrl);
 882
 883	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 884		netif_start_queue (dev);		/* Typical path */
 885	else
 886		yp->tx_full = 1;
 887
 888	if (yellowfin_debug > 4) {
 889		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 890			      yp->cur_tx, entry);
 891	}
 892	return NETDEV_TX_OK;
 893}
 894
 895/* The interrupt handler does all of the Rx thread work and cleans up
 896   after the Tx thread. */
 897static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 898{
 899	struct net_device *dev = dev_instance;
 900	struct yellowfin_private *yp;
 901	void __iomem *ioaddr;
 902	int boguscnt = max_interrupt_work;
 903	unsigned int handled = 0;
 904
 905	yp = netdev_priv(dev);
 906	ioaddr = yp->base;
 907
 908	spin_lock (&yp->lock);
 909
 910	do {
 911		u16 intr_status = ioread16(ioaddr + IntrClear);
 912
 913		if (yellowfin_debug > 4)
 914			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 915				      intr_status);
 916
 917		if (intr_status == 0)
 918			break;
 919		handled = 1;
 920
 921		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 922			yellowfin_rx(dev);
 923			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 924		}
 925
 926#ifdef NO_TXSTATS
 927		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 928			int entry = yp->dirty_tx % TX_RING_SIZE;
 929			struct sk_buff *skb;
 930
 931			if (yp->tx_ring[entry].result_status == 0)
 932				break;
 933			skb = yp->tx_skbuff[entry];
 934			dev->stats.tx_packets++;
 935			dev->stats.tx_bytes += skb->len;
 936			/* Free the original skb. */
 937			dma_unmap_single(&yp->pci_dev->dev,
 938					 le32_to_cpu(yp->tx_ring[entry].addr),
 939					 skb->len, DMA_TO_DEVICE);
 940			dev_consume_skb_irq(skb);
 941			yp->tx_skbuff[entry] = NULL;
 942		}
 943		if (yp->tx_full &&
 944		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 945			/* The ring is no longer full, clear tbusy. */
 946			yp->tx_full = 0;
 947			netif_wake_queue(dev);
 948		}
 949#else
 950		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 951			unsigned dirty_tx = yp->dirty_tx;
 952
 953			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 954				 dirty_tx++) {
 955				/* Todo: optimize this. */
 956				int entry = dirty_tx % TX_RING_SIZE;
 957				u16 tx_errs = yp->tx_status[entry].tx_errs;
 958				struct sk_buff *skb;
 959
 960#ifndef final_version
 961				if (yellowfin_debug > 5)
 962					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 963						      entry,
 964						      yp->tx_status[entry].tx_cnt,
 965						      yp->tx_status[entry].tx_errs,
 966						      yp->tx_status[entry].total_tx_cnt,
 967						      yp->tx_status[entry].paused);
 968#endif
 969				if (tx_errs == 0)
 970					break;	/* It still hasn't been Txed */
 971				skb = yp->tx_skbuff[entry];
 972				if (tx_errs & 0xF810) {
 973					/* There was an major error, log it. */
 974#ifndef final_version
 975					if (yellowfin_debug > 1)
 976						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 977							      tx_errs);
 978#endif
 979					dev->stats.tx_errors++;
 980					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 981					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 982					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 983					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 984				} else {
 985#ifndef final_version
 986					if (yellowfin_debug > 4)
 987						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 988							      tx_errs);
 989#endif
 990					dev->stats.tx_bytes += skb->len;
 991					dev->stats.collisions += tx_errs & 15;
 992					dev->stats.tx_packets++;
 993				}
 994				/* Free the original skb. */
 995				dma_unmap_single(&yp->pci_dev->dev,
 996						 yp->tx_ring[entry << 1].addr,
 997						 skb->len, DMA_TO_DEVICE);
 998				dev_consume_skb_irq(skb);
 999				yp->tx_skbuff[entry] = 0;
1000				/* Mark status as empty. */
1001				yp->tx_status[entry].tx_errs = 0;
1002			}
1003
1004#ifndef final_version
1005			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1006				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1007					   dirty_tx, yp->cur_tx, yp->tx_full);
1008				dirty_tx += TX_RING_SIZE;
1009			}
1010#endif
1011
1012			if (yp->tx_full &&
1013			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1014				/* The ring is no longer full, clear tbusy. */
1015				yp->tx_full = 0;
1016				netif_wake_queue(dev);
1017			}
1018
1019			yp->dirty_tx = dirty_tx;
1020			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1021		}
1022#endif
1023
1024		/* Log errors and other uncommon events. */
1025		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1026			yellowfin_error(dev, intr_status);
1027
1028		if (--boguscnt < 0) {
1029			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1030				    intr_status);
1031			break;
1032		}
1033	} while (1);
1034
1035	if (yellowfin_debug > 3)
1036		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1037			      ioread16(ioaddr + IntrStatus));
1038
1039	spin_unlock (&yp->lock);
1040	return IRQ_RETVAL(handled);
1041}
1042
1043/* This routine is logically part of the interrupt handler, but separated
1044   for clarity and better register allocation. */
1045static int yellowfin_rx(struct net_device *dev)
1046{
1047	struct yellowfin_private *yp = netdev_priv(dev);
1048	int entry = yp->cur_rx % RX_RING_SIZE;
1049	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1050
1051	if (yellowfin_debug > 4) {
1052		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1053			   entry, yp->rx_ring[entry].result_status);
1054		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1055			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1056			   yp->rx_ring[entry].result_status);
1057	}
1058
1059	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1060	while (1) {
1061		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1062		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1063		s16 frame_status;
1064		u16 desc_status;
1065		int data_size, __maybe_unused yf_size;
1066		u8 *buf_addr;
1067
1068		if(!desc->result_status)
1069			break;
1070		dma_sync_single_for_cpu(&yp->pci_dev->dev,
1071					le32_to_cpu(desc->addr),
1072					yp->rx_buf_sz, DMA_FROM_DEVICE);
1073		desc_status = le32_to_cpu(desc->result_status) >> 16;
1074		buf_addr = rx_skb->data;
1075		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1076			le32_to_cpu(desc->result_status)) & 0xffff;
1077		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1078		if (yellowfin_debug > 4)
1079			printk(KERN_DEBUG "  %s() status was %04x\n",
1080			       __func__, frame_status);
1081		if (--boguscnt < 0)
1082			break;
1083
1084		yf_size = sizeof(struct yellowfin_desc);
1085
1086		if ( ! (desc_status & RX_EOP)) {
1087			if (data_size != 0)
1088				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1089					    desc_status, data_size);
1090			dev->stats.rx_length_errors++;
1091		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1092			/* There was a error. */
1093			if (yellowfin_debug > 3)
1094				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1095				       __func__, frame_status);
1096			dev->stats.rx_errors++;
1097			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1098			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1099			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1100			if (frame_status < 0) dev->stats.rx_dropped++;
1101		} else if ( !(yp->drv_flags & IsGigabit)  &&
1102				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1103			u8 status1 = buf_addr[data_size-2];
1104			u8 status2 = buf_addr[data_size-1];
1105			dev->stats.rx_errors++;
1106			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1107			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1108			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1109			if (status2 & 0x80) dev->stats.rx_dropped++;
1110#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1111		} else if ((yp->flags & HasMACAddrBug)  &&
1112			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1113						      entry * yf_size),
1114					  dev->dev_addr) &&
1115			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1116						      entry * yf_size),
1117					  "\377\377\377\377\377\377")) {
1118			if (bogus_rx++ == 0)
1119				netdev_warn(dev, "Bad frame to %pM\n",
1120					    buf_addr);
1121#endif
1122		} else {
1123			struct sk_buff *skb;
1124			int pkt_len = data_size -
1125				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1126			/* To verify: Yellowfin Length should omit the CRC! */
1127
1128#ifndef final_version
1129			if (yellowfin_debug > 4)
1130				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1131				       __func__, pkt_len, data_size, boguscnt);
1132#endif
1133			/* Check if the packet is long enough to just pass up the skbuff
1134			   without copying to a properly sized skbuff. */
1135			if (pkt_len > rx_copybreak) {
1136				skb_put(skb = rx_skb, pkt_len);
1137				dma_unmap_single(&yp->pci_dev->dev,
1138						 le32_to_cpu(yp->rx_ring[entry].addr),
1139						 yp->rx_buf_sz,
1140						 DMA_FROM_DEVICE);
1141				yp->rx_skbuff[entry] = NULL;
1142			} else {
1143				skb = netdev_alloc_skb(dev, pkt_len + 2);
1144				if (skb == NULL)
1145					break;
1146				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1147				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1148				skb_put(skb, pkt_len);
1149				dma_sync_single_for_device(&yp->pci_dev->dev,
1150							   le32_to_cpu(desc->addr),
1151							   yp->rx_buf_sz,
1152							   DMA_FROM_DEVICE);
1153			}
1154			skb->protocol = eth_type_trans(skb, dev);
1155			netif_rx(skb);
1156			dev->stats.rx_packets++;
1157			dev->stats.rx_bytes += pkt_len;
1158		}
1159		entry = (++yp->cur_rx) % RX_RING_SIZE;
1160	}
1161
1162	/* Refill the Rx ring buffers. */
1163	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1164		entry = yp->dirty_rx % RX_RING_SIZE;
1165		if (yp->rx_skbuff[entry] == NULL) {
1166			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1167			if (skb == NULL)
1168				break;				/* Better luck next round. */
1169			yp->rx_skbuff[entry] = skb;
1170			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1171			yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1172									     skb->data,
1173									     yp->rx_buf_sz,
1174									     DMA_FROM_DEVICE));
1175		}
1176		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1177		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1178		if (entry != 0)
1179			yp->rx_ring[entry - 1].dbdma_cmd =
1180				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1181		else
1182			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1183				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1184							| yp->rx_buf_sz);
1185	}
1186
1187	return 0;
1188}
1189
1190static void yellowfin_error(struct net_device *dev, int intr_status)
1191{
1192	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1193	/* Hmmmmm, it's not clear what to do here. */
1194	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1195		dev->stats.tx_errors++;
1196	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1197		dev->stats.rx_errors++;
1198}
1199
1200static int yellowfin_close(struct net_device *dev)
1201{
1202	struct yellowfin_private *yp = netdev_priv(dev);
1203	void __iomem *ioaddr = yp->base;
1204	int i;
1205
1206	netif_stop_queue (dev);
1207
1208	if (yellowfin_debug > 1) {
1209		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1210			      ioread16(ioaddr + TxStatus),
1211			      ioread16(ioaddr + RxStatus),
1212			      ioread16(ioaddr + IntrStatus));
1213		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1214			      yp->cur_tx, yp->dirty_tx,
1215			      yp->cur_rx, yp->dirty_rx);
1216	}
1217
1218	/* Disable interrupts by clearing the interrupt mask. */
1219	iowrite16(0x0000, ioaddr + IntrEnb);
1220
1221	/* Stop the chip's Tx and Rx processes. */
1222	iowrite32(0x80000000, ioaddr + RxCtrl);
1223	iowrite32(0x80000000, ioaddr + TxCtrl);
1224
1225	del_timer(&yp->timer);
1226
1227#if defined(__i386__)
1228	if (yellowfin_debug > 2) {
1229		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1230				(unsigned long long)yp->tx_ring_dma);
1231		for (i = 0; i < TX_RING_SIZE*2; i++)
1232			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1233				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1234				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1235				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1236		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1237		for (i = 0; i < TX_RING_SIZE; i++)
1238			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1239				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1240				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1241
1242		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1243				(unsigned long long)yp->rx_ring_dma);
1244		for (i = 0; i < RX_RING_SIZE; i++) {
1245			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1246				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1247				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1248				   yp->rx_ring[i].result_status);
1249			if (yellowfin_debug > 6) {
1250				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1251					int j;
1252
1253					printk(KERN_DEBUG);
1254					for (j = 0; j < 0x50; j++)
1255						pr_cont(" %04x",
1256							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1257					pr_cont("\n");
1258				}
1259			}
1260		}
1261	}
1262#endif /* __i386__ debugging only */
1263
1264	free_irq(yp->pci_dev->irq, dev);
1265
1266	/* Free all the skbuffs in the Rx queue. */
1267	for (i = 0; i < RX_RING_SIZE; i++) {
1268		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1269		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1270		if (yp->rx_skbuff[i]) {
1271			dev_kfree_skb(yp->rx_skbuff[i]);
1272		}
1273		yp->rx_skbuff[i] = NULL;
1274	}
1275	for (i = 0; i < TX_RING_SIZE; i++) {
1276		dev_kfree_skb(yp->tx_skbuff[i]);
 
1277		yp->tx_skbuff[i] = NULL;
1278	}
1279
1280#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1281	if (yellowfin_debug > 0) {
1282		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1283			      bogus_rx);
1284	}
1285#endif
1286
1287	return 0;
1288}
1289
1290/* Set or clear the multicast filter for this adaptor. */
1291
1292static void set_rx_mode(struct net_device *dev)
1293{
1294	struct yellowfin_private *yp = netdev_priv(dev);
1295	void __iomem *ioaddr = yp->base;
1296	u16 cfg_value = ioread16(ioaddr + Cnfg);
1297
1298	/* Stop the Rx process to change any value. */
1299	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1300	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1301		iowrite16(0x000F, ioaddr + AddrMode);
1302	} else if ((netdev_mc_count(dev) > 64) ||
1303		   (dev->flags & IFF_ALLMULTI)) {
1304		/* Too many to filter well, or accept all multicasts. */
1305		iowrite16(0x000B, ioaddr + AddrMode);
1306	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1307		struct netdev_hw_addr *ha;
1308		u16 hash_table[4];
1309		int i;
1310
1311		memset(hash_table, 0, sizeof(hash_table));
1312		netdev_for_each_mc_addr(ha, dev) {
1313			unsigned int bit;
1314
1315			/* Due to a bug in the early chip versions, multiple filter
1316			   slots must be set for each address. */
1317			if (yp->drv_flags & HasMulticastBug) {
1318				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1319				hash_table[bit >> 4] |= (1 << bit);
1320				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1321				hash_table[bit >> 4] |= (1 << bit);
1322				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1323				hash_table[bit >> 4] |= (1 << bit);
1324			}
1325			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1326			hash_table[bit >> 4] |= (1 << bit);
1327		}
1328		/* Copy the hash table to the chip. */
1329		for (i = 0; i < 4; i++)
1330			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1331		iowrite16(0x0003, ioaddr + AddrMode);
1332	} else {					/* Normal, unicast/broadcast-only mode. */
1333		iowrite16(0x0001, ioaddr + AddrMode);
1334	}
1335	/* Restart the Rx process. */
1336	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1337}
1338
1339static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1340{
1341	struct yellowfin_private *np = netdev_priv(dev);
1342
1343	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1344	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1345	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1346}
1347
1348static const struct ethtool_ops ethtool_ops = {
1349	.get_drvinfo = yellowfin_get_drvinfo
1350};
1351
1352static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1353{
1354	struct yellowfin_private *np = netdev_priv(dev);
1355	void __iomem *ioaddr = np->base;
1356	struct mii_ioctl_data *data = if_mii(rq);
1357
1358	switch(cmd) {
1359	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1360		data->phy_id = np->phys[0] & 0x1f;
1361		fallthrough;
1362
1363	case SIOCGMIIREG:		/* Read MII PHY register. */
1364		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1365		return 0;
1366
1367	case SIOCSMIIREG:		/* Write MII PHY register. */
1368		if (data->phy_id == np->phys[0]) {
1369			u16 value = data->val_in;
1370			switch (data->reg_num) {
1371			case 0:
1372				/* Check for autonegotiation on or reset. */
1373				np->medialock = (value & 0x9000) ? 0 : 1;
1374				if (np->medialock)
1375					np->full_duplex = (value & 0x0100) ? 1 : 0;
1376				break;
1377			case 4: np->advertising = value; break;
1378			}
1379			/* Perhaps check_duplex(dev), depending on chip semantics. */
1380		}
1381		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1382		return 0;
1383	default:
1384		return -EOPNOTSUPP;
1385	}
1386}
1387
1388
1389static void yellowfin_remove_one(struct pci_dev *pdev)
1390{
1391	struct net_device *dev = pci_get_drvdata(pdev);
1392	struct yellowfin_private *np;
1393
1394	BUG_ON(!dev);
1395	np = netdev_priv(dev);
1396
1397	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1398			  np->tx_status_dma);
1399	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1400			  np->rx_ring_dma);
1401	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1402			  np->tx_ring_dma);
1403	unregister_netdev (dev);
1404
1405	pci_iounmap(pdev, np->base);
1406
1407	pci_release_regions (pdev);
1408
1409	free_netdev (dev);
1410}
1411
1412
1413static struct pci_driver yellowfin_driver = {
1414	.name		= DRV_NAME,
1415	.id_table	= yellowfin_pci_tbl,
1416	.probe		= yellowfin_init_one,
1417	.remove		= yellowfin_remove_one,
1418};
1419
1420
1421static int __init yellowfin_init (void)
1422{
1423/* when a module, this is printed whether or not devices are found in probe */
1424#ifdef MODULE
1425	printk(version);
1426#endif
1427	return pci_register_driver(&yellowfin_driver);
1428}
1429
1430
1431static void __exit yellowfin_cleanup (void)
1432{
1433	pci_unregister_driver (&yellowfin_driver);
1434}
1435
1436
1437module_init(yellowfin_init);
1438module_exit(yellowfin_cleanup);
v4.10.11
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(unsigned long data);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_do_ioctl 		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 
 387
 388/* when built into the kernel, we only print version if device is found */
 389#ifndef MODULE
 390	static int printed_version;
 391	if (!printed_version++)
 392		printk(version);
 393#endif
 394
 395	i = pci_enable_device(pdev);
 396	if (i) return i;
 397
 398	dev = alloc_etherdev(sizeof(*np));
 399	if (!dev)
 400		return -ENOMEM;
 401
 402	SET_NETDEV_DEV(dev, &pdev->dev);
 403
 404	np = netdev_priv(dev);
 405
 406	if (pci_request_regions(pdev, DRV_NAME))
 407		goto err_out_free_netdev;
 408
 409	pci_set_master (pdev);
 410
 411	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 412	if (!ioaddr)
 413		goto err_out_free_res;
 414
 415	irq = pdev->irq;
 416
 417	if (drv_flags & DontUseEeprom)
 418		for (i = 0; i < 6; i++)
 419			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 420	else {
 421		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 422		for (i = 0; i < 6; i++)
 423			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 424	}
 
 425
 426	/* Reset the chip. */
 427	iowrite32(0x80000000, ioaddr + DMACtrl);
 428
 429	pci_set_drvdata(pdev, dev);
 430	spin_lock_init(&np->lock);
 431
 432	np->pci_dev = pdev;
 433	np->chip_id = chip_idx;
 434	np->drv_flags = drv_flags;
 435	np->base = ioaddr;
 436
 437	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 
 438	if (!ring_space)
 439		goto err_out_cleardev;
 440	np->tx_ring = ring_space;
 441	np->tx_ring_dma = ring_dma;
 442
 443	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 
 444	if (!ring_space)
 445		goto err_out_unmap_tx;
 446	np->rx_ring = ring_space;
 447	np->rx_ring_dma = ring_dma;
 448
 449	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 
 450	if (!ring_space)
 451		goto err_out_unmap_rx;
 452	np->tx_status = ring_space;
 453	np->tx_status_dma = ring_dma;
 454
 455	if (dev->mem_start)
 456		option = dev->mem_start;
 457
 458	/* The lower four bits are the media type. */
 459	if (option > 0) {
 460		if (option & 0x200)
 461			np->full_duplex = 1;
 462		np->default_port = option & 15;
 463		if (np->default_port)
 464			np->medialock = 1;
 465	}
 466	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 467		np->full_duplex = 1;
 468
 469	if (np->full_duplex)
 470		np->duplex_lock = 1;
 471
 472	/* The Yellowfin-specific entries in the device structure. */
 473	dev->netdev_ops = &netdev_ops;
 474	dev->ethtool_ops = &ethtool_ops;
 475	dev->watchdog_timeo = TX_TIMEOUT;
 476
 477	if (mtu)
 478		dev->mtu = mtu;
 479
 480	i = register_netdev(dev);
 481	if (i)
 482		goto err_out_unmap_status;
 483
 484	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 485		    pci_id_tbl[chip_idx].name,
 486		    ioread32(ioaddr + ChipRev), ioaddr,
 487		    dev->dev_addr, irq);
 488
 489	if (np->drv_flags & HasMII) {
 490		int phy, phy_idx = 0;
 491		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 492			int mii_status = mdio_read(ioaddr, phy, 1);
 493			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 494				np->phys[phy_idx++] = phy;
 495				np->advertising = mdio_read(ioaddr, phy, 4);
 496				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 497					    phy, mii_status, np->advertising);
 498			}
 499		}
 500		np->mii_cnt = phy_idx;
 501	}
 502
 503	find_cnt++;
 504
 505	return 0;
 506
 507err_out_unmap_status:
 508        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 509		np->tx_status_dma);
 510err_out_unmap_rx:
 511        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 
 512err_out_unmap_tx:
 513        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 514err_out_cleardev:
 515	pci_iounmap(pdev, ioaddr);
 516err_out_free_res:
 517	pci_release_regions(pdev);
 518err_out_free_netdev:
 519	free_netdev (dev);
 520	return -ENODEV;
 521}
 522
 523static int read_eeprom(void __iomem *ioaddr, int location)
 524{
 525	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 526
 527	iowrite8(location, ioaddr + EEAddr);
 528	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 529	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 530		;
 531	return ioread8(ioaddr + EERead);
 532}
 533
 534/* MII Managemen Data I/O accesses.
 535   These routines assume the MDIO controller is idle, and do not exit until
 536   the command is finished. */
 537
 538static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 539{
 540	int i;
 541
 542	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 543	iowrite16(1, ioaddr + MII_Cmd);
 544	for (i = 10000; i >= 0; i--)
 545		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 546			break;
 547	return ioread16(ioaddr + MII_Rd_Data);
 548}
 549
 550static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 551{
 552	int i;
 553
 554	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 555	iowrite16(value, ioaddr + MII_Wr_Data);
 556
 557	/* Wait for the command to finish. */
 558	for (i = 10000; i >= 0; i--)
 559		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 560			break;
 561}
 562
 563
 564static int yellowfin_open(struct net_device *dev)
 565{
 566	struct yellowfin_private *yp = netdev_priv(dev);
 567	const int irq = yp->pci_dev->irq;
 568	void __iomem *ioaddr = yp->base;
 569	int i, rc;
 570
 571	/* Reset the chip. */
 572	iowrite32(0x80000000, ioaddr + DMACtrl);
 573
 574	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 575	if (rc)
 576		return rc;
 577
 578	rc = yellowfin_init_ring(dev);
 579	if (rc < 0)
 580		goto err_free_irq;
 581
 582	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 583	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 584
 585	for (i = 0; i < 6; i++)
 586		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 587
 588	/* Set up various condition 'select' registers.
 589	   There are no options here. */
 590	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 591	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 592	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 593	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 594	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 595	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 596
 597	/* Initialize other registers: with so many this eventually this will
 598	   converted to an offset/value list. */
 599	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 600	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 601	/* Enable automatic generation of flow control frames, period 0xffff. */
 602	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 603
 604	yp->tx_threshold = 32;
 605	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 606
 607	if (dev->if_port == 0)
 608		dev->if_port = yp->default_port;
 609
 610	netif_start_queue(dev);
 611
 612	/* Setting the Rx mode will start the Rx process. */
 613	if (yp->drv_flags & IsGigabit) {
 614		/* We are always in full-duplex mode with gigabit! */
 615		yp->full_duplex = 1;
 616		iowrite16(0x01CF, ioaddr + Cnfg);
 617	} else {
 618		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 619		iowrite16(0x1018, ioaddr + FrameGap1);
 620		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 621	}
 622	set_rx_mode(dev);
 623
 624	/* Enable interrupts by setting the interrupt mask. */
 625	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 626	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 627	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 628	iowrite32(0x80008000, ioaddr + TxCtrl);
 629
 630	if (yellowfin_debug > 2) {
 631		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 632	}
 633
 634	/* Set the timer to check for link beat. */
 635	init_timer(&yp->timer);
 636	yp->timer.expires = jiffies + 3*HZ;
 637	yp->timer.data = (unsigned long)dev;
 638	yp->timer.function = yellowfin_timer;				/* timer handler */
 639	add_timer(&yp->timer);
 640out:
 641	return rc;
 642
 643err_free_irq:
 644	free_irq(irq, dev);
 645	goto out;
 646}
 647
 648static void yellowfin_timer(unsigned long data)
 649{
 650	struct net_device *dev = (struct net_device *)data;
 651	struct yellowfin_private *yp = netdev_priv(dev);
 652	void __iomem *ioaddr = yp->base;
 653	int next_tick = 60*HZ;
 654
 655	if (yellowfin_debug > 3) {
 656		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 657			      ioread16(ioaddr + IntrStatus));
 658	}
 659
 660	if (yp->mii_cnt) {
 661		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 662		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 663		int negotiated = lpa & yp->advertising;
 664		if (yellowfin_debug > 1)
 665			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 666				      yp->phys[0], bmsr, lpa);
 667
 668		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 669
 670		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 671
 672		if (bmsr & BMSR_LSTATUS)
 673			next_tick = 60*HZ;
 674		else
 675			next_tick = 3*HZ;
 676	}
 677
 678	yp->timer.expires = jiffies + next_tick;
 679	add_timer(&yp->timer);
 680}
 681
 682static void yellowfin_tx_timeout(struct net_device *dev)
 683{
 684	struct yellowfin_private *yp = netdev_priv(dev);
 685	void __iomem *ioaddr = yp->base;
 686
 687	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 688		    yp->cur_tx, yp->dirty_tx,
 689		    ioread32(ioaddr + TxStatus),
 690		    ioread32(ioaddr + RxStatus));
 691
 692	/* Note: these should be KERN_DEBUG. */
 693	if (yellowfin_debug) {
 694		int i;
 695		pr_warn("  Rx ring %p: ", yp->rx_ring);
 696		for (i = 0; i < RX_RING_SIZE; i++)
 697			pr_cont(" %08x", yp->rx_ring[i].result_status);
 698		pr_cont("\n");
 699		pr_warn("  Tx ring %p: ", yp->tx_ring);
 700		for (i = 0; i < TX_RING_SIZE; i++)
 701			pr_cont(" %04x /%08x",
 702			       yp->tx_status[i].tx_errs,
 703			       yp->tx_ring[i].result_status);
 704		pr_cont("\n");
 705	}
 706
 707	/* If the hardware is found to hang regularly, we will update the code
 708	   to reinitialize the chip here. */
 709	dev->if_port = 0;
 710
 711	/* Wake the potentially-idle transmit channel. */
 712	iowrite32(0x10001000, yp->base + TxCtrl);
 713	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 714		netif_wake_queue (dev);		/* Typical path */
 715
 716	netif_trans_update(dev); /* prevent tx timeout */
 717	dev->stats.tx_errors++;
 718}
 719
 720/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 721static int yellowfin_init_ring(struct net_device *dev)
 722{
 723	struct yellowfin_private *yp = netdev_priv(dev);
 724	int i, j;
 725
 726	yp->tx_full = 0;
 727	yp->cur_rx = yp->cur_tx = 0;
 728	yp->dirty_tx = 0;
 729
 730	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 731
 732	for (i = 0; i < RX_RING_SIZE; i++) {
 733		yp->rx_ring[i].dbdma_cmd =
 734			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 735		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 736			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 737	}
 738
 739	for (i = 0; i < RX_RING_SIZE; i++) {
 740		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 741		yp->rx_skbuff[i] = skb;
 742		if (skb == NULL)
 743			break;
 744		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 745		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 746			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
 747	}
 748	if (i != RX_RING_SIZE) {
 749		for (j = 0; j < i; j++)
 750			dev_kfree_skb(yp->rx_skbuff[j]);
 751		return -ENOMEM;
 752	}
 753	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 754	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 755
 756#define NO_TXSTATS
 757#ifdef NO_TXSTATS
 758	/* In this mode the Tx ring needs only a single descriptor. */
 759	for (i = 0; i < TX_RING_SIZE; i++) {
 760		yp->tx_skbuff[i] = NULL;
 761		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 762		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 763			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 764	}
 765	/* Wrap ring */
 766	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 767#else
 768{
 769	/* Tx ring needs a pair of descriptors, the second for the status. */
 770	for (i = 0; i < TX_RING_SIZE; i++) {
 771		j = 2*i;
 772		yp->tx_skbuff[i] = 0;
 773		/* Branch on Tx error. */
 774		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 775		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 776			(j+1)*sizeof(struct yellowfin_desc));
 777		j++;
 778		if (yp->flags & FullTxStatus) {
 779			yp->tx_ring[j].dbdma_cmd =
 780				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 781			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 782			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 783				i*sizeof(struct tx_status_words));
 784		} else {
 785			/* Symbios chips write only tx_errs word. */
 786			yp->tx_ring[j].dbdma_cmd =
 787				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 788			yp->tx_ring[j].request_cnt = 2;
 789			/* Om pade ummmmm... */
 790			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 791				i*sizeof(struct tx_status_words) +
 792				&(yp->tx_status[0].tx_errs) -
 793				&(yp->tx_status[0]));
 794		}
 795		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 796			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 797	}
 798	/* Wrap ring */
 799	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 800}
 801#endif
 802	yp->tx_tail_desc = &yp->tx_status[0];
 803	return 0;
 804}
 805
 806static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 807					struct net_device *dev)
 808{
 809	struct yellowfin_private *yp = netdev_priv(dev);
 810	unsigned entry;
 811	int len = skb->len;
 812
 813	netif_stop_queue (dev);
 814
 815	/* Note: Ordering is important here, set the field with the
 816	   "ownership" bit last, and only then increment cur_tx. */
 817
 818	/* Calculate the next Tx descriptor entry. */
 819	entry = yp->cur_tx % TX_RING_SIZE;
 820
 821	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 822		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 823		/* Fix GX chipset errata. */
 824		if (cacheline_end > 24  || cacheline_end == 0) {
 825			len = skb->len + 32 - cacheline_end + 1;
 826			if (skb_padto(skb, len)) {
 827				yp->tx_skbuff[entry] = NULL;
 828				netif_wake_queue(dev);
 829				return NETDEV_TX_OK;
 830			}
 831		}
 832	}
 833	yp->tx_skbuff[entry] = skb;
 834
 835#ifdef NO_TXSTATS
 836	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 837		skb->data, len, PCI_DMA_TODEVICE));
 
 838	yp->tx_ring[entry].result_status = 0;
 839	if (entry >= TX_RING_SIZE-1) {
 840		/* New stop command. */
 841		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 842		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 843			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 844	} else {
 845		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 846		yp->tx_ring[entry].dbdma_cmd =
 847			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 848	}
 849	yp->cur_tx++;
 850#else
 851	yp->tx_ring[entry<<1].request_cnt = len;
 852	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 853		skb->data, len, PCI_DMA_TODEVICE));
 
 854	/* The input_last (status-write) command is constant, but we must
 855	   rewrite the subsequent 'stop' command. */
 856
 857	yp->cur_tx++;
 858	{
 859		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 860		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 861	}
 862	/* Final step -- overwrite the old 'stop' command. */
 863
 864	yp->tx_ring[entry<<1].dbdma_cmd =
 865		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 866					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 867#endif
 868
 869	/* Non-x86 Todo: explicitly flush cache lines here. */
 870
 871	/* Wake the potentially-idle transmit channel. */
 872	iowrite32(0x10001000, yp->base + TxCtrl);
 873
 874	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 875		netif_start_queue (dev);		/* Typical path */
 876	else
 877		yp->tx_full = 1;
 878
 879	if (yellowfin_debug > 4) {
 880		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 881			      yp->cur_tx, entry);
 882	}
 883	return NETDEV_TX_OK;
 884}
 885
 886/* The interrupt handler does all of the Rx thread work and cleans up
 887   after the Tx thread. */
 888static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 889{
 890	struct net_device *dev = dev_instance;
 891	struct yellowfin_private *yp;
 892	void __iomem *ioaddr;
 893	int boguscnt = max_interrupt_work;
 894	unsigned int handled = 0;
 895
 896	yp = netdev_priv(dev);
 897	ioaddr = yp->base;
 898
 899	spin_lock (&yp->lock);
 900
 901	do {
 902		u16 intr_status = ioread16(ioaddr + IntrClear);
 903
 904		if (yellowfin_debug > 4)
 905			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 906				      intr_status);
 907
 908		if (intr_status == 0)
 909			break;
 910		handled = 1;
 911
 912		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 913			yellowfin_rx(dev);
 914			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 915		}
 916
 917#ifdef NO_TXSTATS
 918		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 919			int entry = yp->dirty_tx % TX_RING_SIZE;
 920			struct sk_buff *skb;
 921
 922			if (yp->tx_ring[entry].result_status == 0)
 923				break;
 924			skb = yp->tx_skbuff[entry];
 925			dev->stats.tx_packets++;
 926			dev->stats.tx_bytes += skb->len;
 927			/* Free the original skb. */
 928			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 929				skb->len, PCI_DMA_TODEVICE);
 930			dev_kfree_skb_irq(skb);
 
 931			yp->tx_skbuff[entry] = NULL;
 932		}
 933		if (yp->tx_full &&
 934		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 935			/* The ring is no longer full, clear tbusy. */
 936			yp->tx_full = 0;
 937			netif_wake_queue(dev);
 938		}
 939#else
 940		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 941			unsigned dirty_tx = yp->dirty_tx;
 942
 943			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 944				 dirty_tx++) {
 945				/* Todo: optimize this. */
 946				int entry = dirty_tx % TX_RING_SIZE;
 947				u16 tx_errs = yp->tx_status[entry].tx_errs;
 948				struct sk_buff *skb;
 949
 950#ifndef final_version
 951				if (yellowfin_debug > 5)
 952					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 953						      entry,
 954						      yp->tx_status[entry].tx_cnt,
 955						      yp->tx_status[entry].tx_errs,
 956						      yp->tx_status[entry].total_tx_cnt,
 957						      yp->tx_status[entry].paused);
 958#endif
 959				if (tx_errs == 0)
 960					break;	/* It still hasn't been Txed */
 961				skb = yp->tx_skbuff[entry];
 962				if (tx_errs & 0xF810) {
 963					/* There was an major error, log it. */
 964#ifndef final_version
 965					if (yellowfin_debug > 1)
 966						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 967							      tx_errs);
 968#endif
 969					dev->stats.tx_errors++;
 970					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 971					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 972					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 973					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 974				} else {
 975#ifndef final_version
 976					if (yellowfin_debug > 4)
 977						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 978							      tx_errs);
 979#endif
 980					dev->stats.tx_bytes += skb->len;
 981					dev->stats.collisions += tx_errs & 15;
 982					dev->stats.tx_packets++;
 983				}
 984				/* Free the original skb. */
 985				pci_unmap_single(yp->pci_dev,
 986					yp->tx_ring[entry<<1].addr, skb->len,
 987					PCI_DMA_TODEVICE);
 988				dev_kfree_skb_irq(skb);
 989				yp->tx_skbuff[entry] = 0;
 990				/* Mark status as empty. */
 991				yp->tx_status[entry].tx_errs = 0;
 992			}
 993
 994#ifndef final_version
 995			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
 996				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 997					   dirty_tx, yp->cur_tx, yp->tx_full);
 998				dirty_tx += TX_RING_SIZE;
 999			}
1000#endif
1001
1002			if (yp->tx_full &&
1003			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1004				/* The ring is no longer full, clear tbusy. */
1005				yp->tx_full = 0;
1006				netif_wake_queue(dev);
1007			}
1008
1009			yp->dirty_tx = dirty_tx;
1010			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1011		}
1012#endif
1013
1014		/* Log errors and other uncommon events. */
1015		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1016			yellowfin_error(dev, intr_status);
1017
1018		if (--boguscnt < 0) {
1019			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1020				    intr_status);
1021			break;
1022		}
1023	} while (1);
1024
1025	if (yellowfin_debug > 3)
1026		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1027			      ioread16(ioaddr + IntrStatus));
1028
1029	spin_unlock (&yp->lock);
1030	return IRQ_RETVAL(handled);
1031}
1032
1033/* This routine is logically part of the interrupt handler, but separated
1034   for clarity and better register allocation. */
1035static int yellowfin_rx(struct net_device *dev)
1036{
1037	struct yellowfin_private *yp = netdev_priv(dev);
1038	int entry = yp->cur_rx % RX_RING_SIZE;
1039	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1040
1041	if (yellowfin_debug > 4) {
1042		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1043			   entry, yp->rx_ring[entry].result_status);
1044		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1045			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1046			   yp->rx_ring[entry].result_status);
1047	}
1048
1049	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1050	while (1) {
1051		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1052		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1053		s16 frame_status;
1054		u16 desc_status;
1055		int data_size, yf_size;
1056		u8 *buf_addr;
1057
1058		if(!desc->result_status)
1059			break;
1060		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1061			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
1062		desc_status = le32_to_cpu(desc->result_status) >> 16;
1063		buf_addr = rx_skb->data;
1064		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1065			le32_to_cpu(desc->result_status)) & 0xffff;
1066		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1067		if (yellowfin_debug > 4)
1068			printk(KERN_DEBUG "  %s() status was %04x\n",
1069			       __func__, frame_status);
1070		if (--boguscnt < 0)
1071			break;
1072
1073		yf_size = sizeof(struct yellowfin_desc);
1074
1075		if ( ! (desc_status & RX_EOP)) {
1076			if (data_size != 0)
1077				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1078					    desc_status, data_size);
1079			dev->stats.rx_length_errors++;
1080		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1081			/* There was a error. */
1082			if (yellowfin_debug > 3)
1083				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1084				       __func__, frame_status);
1085			dev->stats.rx_errors++;
1086			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1087			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1088			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1089			if (frame_status < 0) dev->stats.rx_dropped++;
1090		} else if ( !(yp->drv_flags & IsGigabit)  &&
1091				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1092			u8 status1 = buf_addr[data_size-2];
1093			u8 status2 = buf_addr[data_size-1];
1094			dev->stats.rx_errors++;
1095			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1096			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1097			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1098			if (status2 & 0x80) dev->stats.rx_dropped++;
1099#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1100		} else if ((yp->flags & HasMACAddrBug)  &&
1101			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1102						      entry * yf_size),
1103					  dev->dev_addr) &&
1104			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1105						      entry * yf_size),
1106					  "\377\377\377\377\377\377")) {
1107			if (bogus_rx++ == 0)
1108				netdev_warn(dev, "Bad frame to %pM\n",
1109					    buf_addr);
1110#endif
1111		} else {
1112			struct sk_buff *skb;
1113			int pkt_len = data_size -
1114				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1115			/* To verify: Yellowfin Length should omit the CRC! */
1116
1117#ifndef final_version
1118			if (yellowfin_debug > 4)
1119				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1120				       __func__, pkt_len, data_size, boguscnt);
1121#endif
1122			/* Check if the packet is long enough to just pass up the skbuff
1123			   without copying to a properly sized skbuff. */
1124			if (pkt_len > rx_copybreak) {
1125				skb_put(skb = rx_skb, pkt_len);
1126				pci_unmap_single(yp->pci_dev,
1127					le32_to_cpu(yp->rx_ring[entry].addr),
1128					yp->rx_buf_sz,
1129					PCI_DMA_FROMDEVICE);
1130				yp->rx_skbuff[entry] = NULL;
1131			} else {
1132				skb = netdev_alloc_skb(dev, pkt_len + 2);
1133				if (skb == NULL)
1134					break;
1135				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1136				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1137				skb_put(skb, pkt_len);
1138				pci_dma_sync_single_for_device(yp->pci_dev,
1139								le32_to_cpu(desc->addr),
1140								yp->rx_buf_sz,
1141								PCI_DMA_FROMDEVICE);
1142			}
1143			skb->protocol = eth_type_trans(skb, dev);
1144			netif_rx(skb);
1145			dev->stats.rx_packets++;
1146			dev->stats.rx_bytes += pkt_len;
1147		}
1148		entry = (++yp->cur_rx) % RX_RING_SIZE;
1149	}
1150
1151	/* Refill the Rx ring buffers. */
1152	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1153		entry = yp->dirty_rx % RX_RING_SIZE;
1154		if (yp->rx_skbuff[entry] == NULL) {
1155			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1156			if (skb == NULL)
1157				break;				/* Better luck next round. */
1158			yp->rx_skbuff[entry] = skb;
1159			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1160			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1161				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
1162		}
1163		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1164		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1165		if (entry != 0)
1166			yp->rx_ring[entry - 1].dbdma_cmd =
1167				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1168		else
1169			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1170				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1171							| yp->rx_buf_sz);
1172	}
1173
1174	return 0;
1175}
1176
1177static void yellowfin_error(struct net_device *dev, int intr_status)
1178{
1179	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1180	/* Hmmmmm, it's not clear what to do here. */
1181	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1182		dev->stats.tx_errors++;
1183	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1184		dev->stats.rx_errors++;
1185}
1186
1187static int yellowfin_close(struct net_device *dev)
1188{
1189	struct yellowfin_private *yp = netdev_priv(dev);
1190	void __iomem *ioaddr = yp->base;
1191	int i;
1192
1193	netif_stop_queue (dev);
1194
1195	if (yellowfin_debug > 1) {
1196		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1197			      ioread16(ioaddr + TxStatus),
1198			      ioread16(ioaddr + RxStatus),
1199			      ioread16(ioaddr + IntrStatus));
1200		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1201			      yp->cur_tx, yp->dirty_tx,
1202			      yp->cur_rx, yp->dirty_rx);
1203	}
1204
1205	/* Disable interrupts by clearing the interrupt mask. */
1206	iowrite16(0x0000, ioaddr + IntrEnb);
1207
1208	/* Stop the chip's Tx and Rx processes. */
1209	iowrite32(0x80000000, ioaddr + RxCtrl);
1210	iowrite32(0x80000000, ioaddr + TxCtrl);
1211
1212	del_timer(&yp->timer);
1213
1214#if defined(__i386__)
1215	if (yellowfin_debug > 2) {
1216		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1217				(unsigned long long)yp->tx_ring_dma);
1218		for (i = 0; i < TX_RING_SIZE*2; i++)
1219			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1220				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1221				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1222				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1223		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1224		for (i = 0; i < TX_RING_SIZE; i++)
1225			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1226				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1227				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1228
1229		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1230				(unsigned long long)yp->rx_ring_dma);
1231		for (i = 0; i < RX_RING_SIZE; i++) {
1232			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1233				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1234				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1235				   yp->rx_ring[i].result_status);
1236			if (yellowfin_debug > 6) {
1237				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1238					int j;
1239
1240					printk(KERN_DEBUG);
1241					for (j = 0; j < 0x50; j++)
1242						pr_cont(" %04x",
1243							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1244					pr_cont("\n");
1245				}
1246			}
1247		}
1248	}
1249#endif /* __i386__ debugging only */
1250
1251	free_irq(yp->pci_dev->irq, dev);
1252
1253	/* Free all the skbuffs in the Rx queue. */
1254	for (i = 0; i < RX_RING_SIZE; i++) {
1255		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1256		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1257		if (yp->rx_skbuff[i]) {
1258			dev_kfree_skb(yp->rx_skbuff[i]);
1259		}
1260		yp->rx_skbuff[i] = NULL;
1261	}
1262	for (i = 0; i < TX_RING_SIZE; i++) {
1263		if (yp->tx_skbuff[i])
1264			dev_kfree_skb(yp->tx_skbuff[i]);
1265		yp->tx_skbuff[i] = NULL;
1266	}
1267
1268#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1269	if (yellowfin_debug > 0) {
1270		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1271			      bogus_rx);
1272	}
1273#endif
1274
1275	return 0;
1276}
1277
1278/* Set or clear the multicast filter for this adaptor. */
1279
1280static void set_rx_mode(struct net_device *dev)
1281{
1282	struct yellowfin_private *yp = netdev_priv(dev);
1283	void __iomem *ioaddr = yp->base;
1284	u16 cfg_value = ioread16(ioaddr + Cnfg);
1285
1286	/* Stop the Rx process to change any value. */
1287	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1288	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1289		iowrite16(0x000F, ioaddr + AddrMode);
1290	} else if ((netdev_mc_count(dev) > 64) ||
1291		   (dev->flags & IFF_ALLMULTI)) {
1292		/* Too many to filter well, or accept all multicasts. */
1293		iowrite16(0x000B, ioaddr + AddrMode);
1294	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1295		struct netdev_hw_addr *ha;
1296		u16 hash_table[4];
1297		int i;
1298
1299		memset(hash_table, 0, sizeof(hash_table));
1300		netdev_for_each_mc_addr(ha, dev) {
1301			unsigned int bit;
1302
1303			/* Due to a bug in the early chip versions, multiple filter
1304			   slots must be set for each address. */
1305			if (yp->drv_flags & HasMulticastBug) {
1306				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1307				hash_table[bit >> 4] |= (1 << bit);
1308				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1309				hash_table[bit >> 4] |= (1 << bit);
1310				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1311				hash_table[bit >> 4] |= (1 << bit);
1312			}
1313			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1314			hash_table[bit >> 4] |= (1 << bit);
1315		}
1316		/* Copy the hash table to the chip. */
1317		for (i = 0; i < 4; i++)
1318			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1319		iowrite16(0x0003, ioaddr + AddrMode);
1320	} else {					/* Normal, unicast/broadcast-only mode. */
1321		iowrite16(0x0001, ioaddr + AddrMode);
1322	}
1323	/* Restart the Rx process. */
1324	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1325}
1326
1327static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1328{
1329	struct yellowfin_private *np = netdev_priv(dev);
1330
1331	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1332	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1333	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1334}
1335
1336static const struct ethtool_ops ethtool_ops = {
1337	.get_drvinfo = yellowfin_get_drvinfo
1338};
1339
1340static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1341{
1342	struct yellowfin_private *np = netdev_priv(dev);
1343	void __iomem *ioaddr = np->base;
1344	struct mii_ioctl_data *data = if_mii(rq);
1345
1346	switch(cmd) {
1347	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1348		data->phy_id = np->phys[0] & 0x1f;
1349		/* Fall Through */
1350
1351	case SIOCGMIIREG:		/* Read MII PHY register. */
1352		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1353		return 0;
1354
1355	case SIOCSMIIREG:		/* Write MII PHY register. */
1356		if (data->phy_id == np->phys[0]) {
1357			u16 value = data->val_in;
1358			switch (data->reg_num) {
1359			case 0:
1360				/* Check for autonegotiation on or reset. */
1361				np->medialock = (value & 0x9000) ? 0 : 1;
1362				if (np->medialock)
1363					np->full_duplex = (value & 0x0100) ? 1 : 0;
1364				break;
1365			case 4: np->advertising = value; break;
1366			}
1367			/* Perhaps check_duplex(dev), depending on chip semantics. */
1368		}
1369		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1370		return 0;
1371	default:
1372		return -EOPNOTSUPP;
1373	}
1374}
1375
1376
1377static void yellowfin_remove_one(struct pci_dev *pdev)
1378{
1379	struct net_device *dev = pci_get_drvdata(pdev);
1380	struct yellowfin_private *np;
1381
1382	BUG_ON(!dev);
1383	np = netdev_priv(dev);
1384
1385        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1386		np->tx_status_dma);
1387	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1388	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 
1389	unregister_netdev (dev);
1390
1391	pci_iounmap(pdev, np->base);
1392
1393	pci_release_regions (pdev);
1394
1395	free_netdev (dev);
1396}
1397
1398
1399static struct pci_driver yellowfin_driver = {
1400	.name		= DRV_NAME,
1401	.id_table	= yellowfin_pci_tbl,
1402	.probe		= yellowfin_init_one,
1403	.remove		= yellowfin_remove_one,
1404};
1405
1406
1407static int __init yellowfin_init (void)
1408{
1409/* when a module, this is printed whether or not devices are found in probe */
1410#ifdef MODULE
1411	printk(version);
1412#endif
1413	return pci_register_driver(&yellowfin_driver);
1414}
1415
1416
1417static void __exit yellowfin_cleanup (void)
1418{
1419	pci_unregister_driver (&yellowfin_driver);
1420}
1421
1422
1423module_init(yellowfin_init);
1424module_exit(yellowfin_cleanup);