Linux Audio

Check our new training course

Loading...
v4.17
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_do_ioctl 		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 387
 388/* when built into the kernel, we only print version if device is found */
 389#ifndef MODULE
 390	static int printed_version;
 391	if (!printed_version++)
 392		printk(version);
 393#endif
 394
 395	i = pci_enable_device(pdev);
 396	if (i) return i;
 397
 398	dev = alloc_etherdev(sizeof(*np));
 399	if (!dev)
 400		return -ENOMEM;
 401
 402	SET_NETDEV_DEV(dev, &pdev->dev);
 403
 404	np = netdev_priv(dev);
 405
 406	if (pci_request_regions(pdev, DRV_NAME))
 407		goto err_out_free_netdev;
 408
 409	pci_set_master (pdev);
 410
 411	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 412	if (!ioaddr)
 413		goto err_out_free_res;
 414
 415	irq = pdev->irq;
 416
 417	if (drv_flags & DontUseEeprom)
 418		for (i = 0; i < 6; i++)
 419			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 420	else {
 421		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 422		for (i = 0; i < 6; i++)
 423			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 424	}
 425
 426	/* Reset the chip. */
 427	iowrite32(0x80000000, ioaddr + DMACtrl);
 428
 429	pci_set_drvdata(pdev, dev);
 430	spin_lock_init(&np->lock);
 431
 432	np->pci_dev = pdev;
 433	np->chip_id = chip_idx;
 434	np->drv_flags = drv_flags;
 435	np->base = ioaddr;
 436
 437	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 
 438	if (!ring_space)
 439		goto err_out_cleardev;
 440	np->tx_ring = ring_space;
 441	np->tx_ring_dma = ring_dma;
 442
 443	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 
 444	if (!ring_space)
 445		goto err_out_unmap_tx;
 446	np->rx_ring = ring_space;
 447	np->rx_ring_dma = ring_dma;
 448
 449	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
 
 450	if (!ring_space)
 451		goto err_out_unmap_rx;
 452	np->tx_status = ring_space;
 453	np->tx_status_dma = ring_dma;
 454
 455	if (dev->mem_start)
 456		option = dev->mem_start;
 457
 458	/* The lower four bits are the media type. */
 459	if (option > 0) {
 460		if (option & 0x200)
 461			np->full_duplex = 1;
 462		np->default_port = option & 15;
 463		if (np->default_port)
 464			np->medialock = 1;
 465	}
 466	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 467		np->full_duplex = 1;
 468
 469	if (np->full_duplex)
 470		np->duplex_lock = 1;
 471
 472	/* The Yellowfin-specific entries in the device structure. */
 473	dev->netdev_ops = &netdev_ops;
 474	dev->ethtool_ops = &ethtool_ops;
 475	dev->watchdog_timeo = TX_TIMEOUT;
 476
 477	if (mtu)
 478		dev->mtu = mtu;
 479
 480	i = register_netdev(dev);
 481	if (i)
 482		goto err_out_unmap_status;
 483
 484	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 485		    pci_id_tbl[chip_idx].name,
 486		    ioread32(ioaddr + ChipRev), ioaddr,
 487		    dev->dev_addr, irq);
 488
 489	if (np->drv_flags & HasMII) {
 490		int phy, phy_idx = 0;
 491		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 492			int mii_status = mdio_read(ioaddr, phy, 1);
 493			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 494				np->phys[phy_idx++] = phy;
 495				np->advertising = mdio_read(ioaddr, phy, 4);
 496				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 497					    phy, mii_status, np->advertising);
 498			}
 499		}
 500		np->mii_cnt = phy_idx;
 501	}
 502
 503	find_cnt++;
 504
 505	return 0;
 506
 507err_out_unmap_status:
 508        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
 509		np->tx_status_dma);
 510err_out_unmap_rx:
 511        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 
 512err_out_unmap_tx:
 513        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 514err_out_cleardev:
 515	pci_iounmap(pdev, ioaddr);
 516err_out_free_res:
 517	pci_release_regions(pdev);
 518err_out_free_netdev:
 519	free_netdev (dev);
 520	return -ENODEV;
 521}
 522
 523static int read_eeprom(void __iomem *ioaddr, int location)
 524{
 525	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 526
 527	iowrite8(location, ioaddr + EEAddr);
 528	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 529	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 530		;
 531	return ioread8(ioaddr + EERead);
 532}
 533
 534/* MII Managemen Data I/O accesses.
 535   These routines assume the MDIO controller is idle, and do not exit until
 536   the command is finished. */
 537
 538static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 539{
 540	int i;
 541
 542	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 543	iowrite16(1, ioaddr + MII_Cmd);
 544	for (i = 10000; i >= 0; i--)
 545		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 546			break;
 547	return ioread16(ioaddr + MII_Rd_Data);
 548}
 549
 550static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 551{
 552	int i;
 553
 554	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 555	iowrite16(value, ioaddr + MII_Wr_Data);
 556
 557	/* Wait for the command to finish. */
 558	for (i = 10000; i >= 0; i--)
 559		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 560			break;
 561}
 562
 563
 564static int yellowfin_open(struct net_device *dev)
 565{
 566	struct yellowfin_private *yp = netdev_priv(dev);
 567	const int irq = yp->pci_dev->irq;
 568	void __iomem *ioaddr = yp->base;
 569	int i, rc;
 570
 571	/* Reset the chip. */
 572	iowrite32(0x80000000, ioaddr + DMACtrl);
 573
 574	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 575	if (rc)
 576		return rc;
 577
 578	rc = yellowfin_init_ring(dev);
 579	if (rc < 0)
 580		goto err_free_irq;
 581
 582	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 583	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 584
 585	for (i = 0; i < 6; i++)
 586		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 587
 588	/* Set up various condition 'select' registers.
 589	   There are no options here. */
 590	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 591	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 592	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 593	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 594	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 595	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 596
 597	/* Initialize other registers: with so many this eventually this will
 598	   converted to an offset/value list. */
 599	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 600	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 601	/* Enable automatic generation of flow control frames, period 0xffff. */
 602	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 603
 604	yp->tx_threshold = 32;
 605	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 606
 607	if (dev->if_port == 0)
 608		dev->if_port = yp->default_port;
 609
 610	netif_start_queue(dev);
 611
 612	/* Setting the Rx mode will start the Rx process. */
 613	if (yp->drv_flags & IsGigabit) {
 614		/* We are always in full-duplex mode with gigabit! */
 615		yp->full_duplex = 1;
 616		iowrite16(0x01CF, ioaddr + Cnfg);
 617	} else {
 618		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 619		iowrite16(0x1018, ioaddr + FrameGap1);
 620		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 621	}
 622	set_rx_mode(dev);
 623
 624	/* Enable interrupts by setting the interrupt mask. */
 625	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 626	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 627	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 628	iowrite32(0x80008000, ioaddr + TxCtrl);
 629
 630	if (yellowfin_debug > 2) {
 631		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 632	}
 633
 634	/* Set the timer to check for link beat. */
 635	timer_setup(&yp->timer, yellowfin_timer, 0);
 636	yp->timer.expires = jiffies + 3*HZ;
 637	add_timer(&yp->timer);
 638out:
 639	return rc;
 640
 641err_free_irq:
 642	free_irq(irq, dev);
 643	goto out;
 644}
 645
 646static void yellowfin_timer(struct timer_list *t)
 647{
 648	struct yellowfin_private *yp = from_timer(yp, t, timer);
 649	struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 650	void __iomem *ioaddr = yp->base;
 651	int next_tick = 60*HZ;
 652
 653	if (yellowfin_debug > 3) {
 654		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 655			      ioread16(ioaddr + IntrStatus));
 656	}
 657
 658	if (yp->mii_cnt) {
 659		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 660		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 661		int negotiated = lpa & yp->advertising;
 662		if (yellowfin_debug > 1)
 663			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 664				      yp->phys[0], bmsr, lpa);
 665
 666		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 667
 668		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 669
 670		if (bmsr & BMSR_LSTATUS)
 671			next_tick = 60*HZ;
 672		else
 673			next_tick = 3*HZ;
 674	}
 675
 676	yp->timer.expires = jiffies + next_tick;
 677	add_timer(&yp->timer);
 678}
 679
 680static void yellowfin_tx_timeout(struct net_device *dev)
 681{
 682	struct yellowfin_private *yp = netdev_priv(dev);
 683	void __iomem *ioaddr = yp->base;
 684
 685	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 686		    yp->cur_tx, yp->dirty_tx,
 687		    ioread32(ioaddr + TxStatus),
 688		    ioread32(ioaddr + RxStatus));
 689
 690	/* Note: these should be KERN_DEBUG. */
 691	if (yellowfin_debug) {
 692		int i;
 693		pr_warn("  Rx ring %p: ", yp->rx_ring);
 694		for (i = 0; i < RX_RING_SIZE; i++)
 695			pr_cont(" %08x", yp->rx_ring[i].result_status);
 696		pr_cont("\n");
 697		pr_warn("  Tx ring %p: ", yp->tx_ring);
 698		for (i = 0; i < TX_RING_SIZE; i++)
 699			pr_cont(" %04x /%08x",
 700			       yp->tx_status[i].tx_errs,
 701			       yp->tx_ring[i].result_status);
 702		pr_cont("\n");
 703	}
 704
 705	/* If the hardware is found to hang regularly, we will update the code
 706	   to reinitialize the chip here. */
 707	dev->if_port = 0;
 708
 709	/* Wake the potentially-idle transmit channel. */
 710	iowrite32(0x10001000, yp->base + TxCtrl);
 711	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 712		netif_wake_queue (dev);		/* Typical path */
 713
 714	netif_trans_update(dev); /* prevent tx timeout */
 715	dev->stats.tx_errors++;
 716}
 717
 718/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 719static int yellowfin_init_ring(struct net_device *dev)
 720{
 721	struct yellowfin_private *yp = netdev_priv(dev);
 722	int i, j;
 723
 724	yp->tx_full = 0;
 725	yp->cur_rx = yp->cur_tx = 0;
 726	yp->dirty_tx = 0;
 727
 728	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 729
 730	for (i = 0; i < RX_RING_SIZE; i++) {
 731		yp->rx_ring[i].dbdma_cmd =
 732			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 733		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 734			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 735	}
 736
 737	for (i = 0; i < RX_RING_SIZE; i++) {
 738		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 739		yp->rx_skbuff[i] = skb;
 740		if (skb == NULL)
 741			break;
 742		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 743		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 744			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
 745	}
 746	if (i != RX_RING_SIZE) {
 747		for (j = 0; j < i; j++)
 748			dev_kfree_skb(yp->rx_skbuff[j]);
 749		return -ENOMEM;
 750	}
 751	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 752	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 753
 754#define NO_TXSTATS
 755#ifdef NO_TXSTATS
 756	/* In this mode the Tx ring needs only a single descriptor. */
 757	for (i = 0; i < TX_RING_SIZE; i++) {
 758		yp->tx_skbuff[i] = NULL;
 759		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 760		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 761			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 762	}
 763	/* Wrap ring */
 764	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 765#else
 766{
 767	/* Tx ring needs a pair of descriptors, the second for the status. */
 768	for (i = 0; i < TX_RING_SIZE; i++) {
 769		j = 2*i;
 770		yp->tx_skbuff[i] = 0;
 771		/* Branch on Tx error. */
 772		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 773		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 774			(j+1)*sizeof(struct yellowfin_desc));
 775		j++;
 776		if (yp->flags & FullTxStatus) {
 777			yp->tx_ring[j].dbdma_cmd =
 778				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 779			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 780			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 781				i*sizeof(struct tx_status_words));
 782		} else {
 783			/* Symbios chips write only tx_errs word. */
 784			yp->tx_ring[j].dbdma_cmd =
 785				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 786			yp->tx_ring[j].request_cnt = 2;
 787			/* Om pade ummmmm... */
 788			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 789				i*sizeof(struct tx_status_words) +
 790				&(yp->tx_status[0].tx_errs) -
 791				&(yp->tx_status[0]));
 792		}
 793		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 794			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 795	}
 796	/* Wrap ring */
 797	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 798}
 799#endif
 800	yp->tx_tail_desc = &yp->tx_status[0];
 801	return 0;
 802}
 803
 804static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 805					struct net_device *dev)
 806{
 807	struct yellowfin_private *yp = netdev_priv(dev);
 808	unsigned entry;
 809	int len = skb->len;
 810
 811	netif_stop_queue (dev);
 812
 813	/* Note: Ordering is important here, set the field with the
 814	   "ownership" bit last, and only then increment cur_tx. */
 815
 816	/* Calculate the next Tx descriptor entry. */
 817	entry = yp->cur_tx % TX_RING_SIZE;
 818
 819	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 820		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 821		/* Fix GX chipset errata. */
 822		if (cacheline_end > 24  || cacheline_end == 0) {
 823			len = skb->len + 32 - cacheline_end + 1;
 824			if (skb_padto(skb, len)) {
 825				yp->tx_skbuff[entry] = NULL;
 826				netif_wake_queue(dev);
 827				return NETDEV_TX_OK;
 828			}
 829		}
 830	}
 831	yp->tx_skbuff[entry] = skb;
 832
 833#ifdef NO_TXSTATS
 834	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 835		skb->data, len, PCI_DMA_TODEVICE));
 
 836	yp->tx_ring[entry].result_status = 0;
 837	if (entry >= TX_RING_SIZE-1) {
 838		/* New stop command. */
 839		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 840		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 841			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 842	} else {
 843		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 844		yp->tx_ring[entry].dbdma_cmd =
 845			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 846	}
 847	yp->cur_tx++;
 848#else
 849	yp->tx_ring[entry<<1].request_cnt = len;
 850	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
 851		skb->data, len, PCI_DMA_TODEVICE));
 
 852	/* The input_last (status-write) command is constant, but we must
 853	   rewrite the subsequent 'stop' command. */
 854
 855	yp->cur_tx++;
 856	{
 857		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 858		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 859	}
 860	/* Final step -- overwrite the old 'stop' command. */
 861
 862	yp->tx_ring[entry<<1].dbdma_cmd =
 863		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 864					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 865#endif
 866
 867	/* Non-x86 Todo: explicitly flush cache lines here. */
 868
 869	/* Wake the potentially-idle transmit channel. */
 870	iowrite32(0x10001000, yp->base + TxCtrl);
 871
 872	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 873		netif_start_queue (dev);		/* Typical path */
 874	else
 875		yp->tx_full = 1;
 876
 877	if (yellowfin_debug > 4) {
 878		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 879			      yp->cur_tx, entry);
 880	}
 881	return NETDEV_TX_OK;
 882}
 883
 884/* The interrupt handler does all of the Rx thread work and cleans up
 885   after the Tx thread. */
 886static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 887{
 888	struct net_device *dev = dev_instance;
 889	struct yellowfin_private *yp;
 890	void __iomem *ioaddr;
 891	int boguscnt = max_interrupt_work;
 892	unsigned int handled = 0;
 893
 894	yp = netdev_priv(dev);
 895	ioaddr = yp->base;
 896
 897	spin_lock (&yp->lock);
 898
 899	do {
 900		u16 intr_status = ioread16(ioaddr + IntrClear);
 901
 902		if (yellowfin_debug > 4)
 903			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 904				      intr_status);
 905
 906		if (intr_status == 0)
 907			break;
 908		handled = 1;
 909
 910		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 911			yellowfin_rx(dev);
 912			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 913		}
 914
 915#ifdef NO_TXSTATS
 916		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 917			int entry = yp->dirty_tx % TX_RING_SIZE;
 918			struct sk_buff *skb;
 919
 920			if (yp->tx_ring[entry].result_status == 0)
 921				break;
 922			skb = yp->tx_skbuff[entry];
 923			dev->stats.tx_packets++;
 924			dev->stats.tx_bytes += skb->len;
 925			/* Free the original skb. */
 926			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
 927				skb->len, PCI_DMA_TODEVICE);
 928			dev_kfree_skb_irq(skb);
 
 929			yp->tx_skbuff[entry] = NULL;
 930		}
 931		if (yp->tx_full &&
 932		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 933			/* The ring is no longer full, clear tbusy. */
 934			yp->tx_full = 0;
 935			netif_wake_queue(dev);
 936		}
 937#else
 938		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 939			unsigned dirty_tx = yp->dirty_tx;
 940
 941			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 942				 dirty_tx++) {
 943				/* Todo: optimize this. */
 944				int entry = dirty_tx % TX_RING_SIZE;
 945				u16 tx_errs = yp->tx_status[entry].tx_errs;
 946				struct sk_buff *skb;
 947
 948#ifndef final_version
 949				if (yellowfin_debug > 5)
 950					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 951						      entry,
 952						      yp->tx_status[entry].tx_cnt,
 953						      yp->tx_status[entry].tx_errs,
 954						      yp->tx_status[entry].total_tx_cnt,
 955						      yp->tx_status[entry].paused);
 956#endif
 957				if (tx_errs == 0)
 958					break;	/* It still hasn't been Txed */
 959				skb = yp->tx_skbuff[entry];
 960				if (tx_errs & 0xF810) {
 961					/* There was an major error, log it. */
 962#ifndef final_version
 963					if (yellowfin_debug > 1)
 964						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 965							      tx_errs);
 966#endif
 967					dev->stats.tx_errors++;
 968					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 969					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 970					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 971					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 972				} else {
 973#ifndef final_version
 974					if (yellowfin_debug > 4)
 975						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 976							      tx_errs);
 977#endif
 978					dev->stats.tx_bytes += skb->len;
 979					dev->stats.collisions += tx_errs & 15;
 980					dev->stats.tx_packets++;
 981				}
 982				/* Free the original skb. */
 983				pci_unmap_single(yp->pci_dev,
 984					yp->tx_ring[entry<<1].addr, skb->len,
 985					PCI_DMA_TODEVICE);
 986				dev_kfree_skb_irq(skb);
 987				yp->tx_skbuff[entry] = 0;
 988				/* Mark status as empty. */
 989				yp->tx_status[entry].tx_errs = 0;
 990			}
 991
 992#ifndef final_version
 993			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
 994				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
 995					   dirty_tx, yp->cur_tx, yp->tx_full);
 996				dirty_tx += TX_RING_SIZE;
 997			}
 998#endif
 999
1000			if (yp->tx_full &&
1001			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1002				/* The ring is no longer full, clear tbusy. */
1003				yp->tx_full = 0;
1004				netif_wake_queue(dev);
1005			}
1006
1007			yp->dirty_tx = dirty_tx;
1008			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1009		}
1010#endif
1011
1012		/* Log errors and other uncommon events. */
1013		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1014			yellowfin_error(dev, intr_status);
1015
1016		if (--boguscnt < 0) {
1017			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1018				    intr_status);
1019			break;
1020		}
1021	} while (1);
1022
1023	if (yellowfin_debug > 3)
1024		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1025			      ioread16(ioaddr + IntrStatus));
1026
1027	spin_unlock (&yp->lock);
1028	return IRQ_RETVAL(handled);
1029}
1030
1031/* This routine is logically part of the interrupt handler, but separated
1032   for clarity and better register allocation. */
1033static int yellowfin_rx(struct net_device *dev)
1034{
1035	struct yellowfin_private *yp = netdev_priv(dev);
1036	int entry = yp->cur_rx % RX_RING_SIZE;
1037	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1038
1039	if (yellowfin_debug > 4) {
1040		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1041			   entry, yp->rx_ring[entry].result_status);
1042		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1043			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1044			   yp->rx_ring[entry].result_status);
1045	}
1046
1047	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1048	while (1) {
1049		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1050		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1051		s16 frame_status;
1052		u16 desc_status;
1053		int data_size, yf_size;
1054		u8 *buf_addr;
1055
1056		if(!desc->result_status)
1057			break;
1058		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1059			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
1060		desc_status = le32_to_cpu(desc->result_status) >> 16;
1061		buf_addr = rx_skb->data;
1062		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1063			le32_to_cpu(desc->result_status)) & 0xffff;
1064		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1065		if (yellowfin_debug > 4)
1066			printk(KERN_DEBUG "  %s() status was %04x\n",
1067			       __func__, frame_status);
1068		if (--boguscnt < 0)
1069			break;
1070
1071		yf_size = sizeof(struct yellowfin_desc);
1072
1073		if ( ! (desc_status & RX_EOP)) {
1074			if (data_size != 0)
1075				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1076					    desc_status, data_size);
1077			dev->stats.rx_length_errors++;
1078		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1079			/* There was a error. */
1080			if (yellowfin_debug > 3)
1081				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1082				       __func__, frame_status);
1083			dev->stats.rx_errors++;
1084			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1085			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1086			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1087			if (frame_status < 0) dev->stats.rx_dropped++;
1088		} else if ( !(yp->drv_flags & IsGigabit)  &&
1089				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1090			u8 status1 = buf_addr[data_size-2];
1091			u8 status2 = buf_addr[data_size-1];
1092			dev->stats.rx_errors++;
1093			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1094			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1095			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1096			if (status2 & 0x80) dev->stats.rx_dropped++;
1097#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1098		} else if ((yp->flags & HasMACAddrBug)  &&
1099			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1100						      entry * yf_size),
1101					  dev->dev_addr) &&
1102			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1103						      entry * yf_size),
1104					  "\377\377\377\377\377\377")) {
1105			if (bogus_rx++ == 0)
1106				netdev_warn(dev, "Bad frame to %pM\n",
1107					    buf_addr);
1108#endif
1109		} else {
1110			struct sk_buff *skb;
1111			int pkt_len = data_size -
1112				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1113			/* To verify: Yellowfin Length should omit the CRC! */
1114
1115#ifndef final_version
1116			if (yellowfin_debug > 4)
1117				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1118				       __func__, pkt_len, data_size, boguscnt);
1119#endif
1120			/* Check if the packet is long enough to just pass up the skbuff
1121			   without copying to a properly sized skbuff. */
1122			if (pkt_len > rx_copybreak) {
1123				skb_put(skb = rx_skb, pkt_len);
1124				pci_unmap_single(yp->pci_dev,
1125					le32_to_cpu(yp->rx_ring[entry].addr),
1126					yp->rx_buf_sz,
1127					PCI_DMA_FROMDEVICE);
1128				yp->rx_skbuff[entry] = NULL;
1129			} else {
1130				skb = netdev_alloc_skb(dev, pkt_len + 2);
1131				if (skb == NULL)
1132					break;
1133				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1134				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1135				skb_put(skb, pkt_len);
1136				pci_dma_sync_single_for_device(yp->pci_dev,
1137								le32_to_cpu(desc->addr),
1138								yp->rx_buf_sz,
1139								PCI_DMA_FROMDEVICE);
1140			}
1141			skb->protocol = eth_type_trans(skb, dev);
1142			netif_rx(skb);
1143			dev->stats.rx_packets++;
1144			dev->stats.rx_bytes += pkt_len;
1145		}
1146		entry = (++yp->cur_rx) % RX_RING_SIZE;
1147	}
1148
1149	/* Refill the Rx ring buffers. */
1150	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1151		entry = yp->dirty_rx % RX_RING_SIZE;
1152		if (yp->rx_skbuff[entry] == NULL) {
1153			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1154			if (skb == NULL)
1155				break;				/* Better luck next round. */
1156			yp->rx_skbuff[entry] = skb;
1157			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1158			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1159				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
 
 
1160		}
1161		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1162		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1163		if (entry != 0)
1164			yp->rx_ring[entry - 1].dbdma_cmd =
1165				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1166		else
1167			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1168				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1169							| yp->rx_buf_sz);
1170	}
1171
1172	return 0;
1173}
1174
1175static void yellowfin_error(struct net_device *dev, int intr_status)
1176{
1177	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1178	/* Hmmmmm, it's not clear what to do here. */
1179	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1180		dev->stats.tx_errors++;
1181	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1182		dev->stats.rx_errors++;
1183}
1184
1185static int yellowfin_close(struct net_device *dev)
1186{
1187	struct yellowfin_private *yp = netdev_priv(dev);
1188	void __iomem *ioaddr = yp->base;
1189	int i;
1190
1191	netif_stop_queue (dev);
1192
1193	if (yellowfin_debug > 1) {
1194		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1195			      ioread16(ioaddr + TxStatus),
1196			      ioread16(ioaddr + RxStatus),
1197			      ioread16(ioaddr + IntrStatus));
1198		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1199			      yp->cur_tx, yp->dirty_tx,
1200			      yp->cur_rx, yp->dirty_rx);
1201	}
1202
1203	/* Disable interrupts by clearing the interrupt mask. */
1204	iowrite16(0x0000, ioaddr + IntrEnb);
1205
1206	/* Stop the chip's Tx and Rx processes. */
1207	iowrite32(0x80000000, ioaddr + RxCtrl);
1208	iowrite32(0x80000000, ioaddr + TxCtrl);
1209
1210	del_timer(&yp->timer);
1211
1212#if defined(__i386__)
1213	if (yellowfin_debug > 2) {
1214		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1215				(unsigned long long)yp->tx_ring_dma);
1216		for (i = 0; i < TX_RING_SIZE*2; i++)
1217			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1218				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1219				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1220				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1221		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1222		for (i = 0; i < TX_RING_SIZE; i++)
1223			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1224				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1225				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1226
1227		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1228				(unsigned long long)yp->rx_ring_dma);
1229		for (i = 0; i < RX_RING_SIZE; i++) {
1230			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1231				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1232				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1233				   yp->rx_ring[i].result_status);
1234			if (yellowfin_debug > 6) {
1235				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1236					int j;
1237
1238					printk(KERN_DEBUG);
1239					for (j = 0; j < 0x50; j++)
1240						pr_cont(" %04x",
1241							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1242					pr_cont("\n");
1243				}
1244			}
1245		}
1246	}
1247#endif /* __i386__ debugging only */
1248
1249	free_irq(yp->pci_dev->irq, dev);
1250
1251	/* Free all the skbuffs in the Rx queue. */
1252	for (i = 0; i < RX_RING_SIZE; i++) {
1253		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1254		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1255		if (yp->rx_skbuff[i]) {
1256			dev_kfree_skb(yp->rx_skbuff[i]);
1257		}
1258		yp->rx_skbuff[i] = NULL;
1259	}
1260	for (i = 0; i < TX_RING_SIZE; i++) {
1261		if (yp->tx_skbuff[i])
1262			dev_kfree_skb(yp->tx_skbuff[i]);
1263		yp->tx_skbuff[i] = NULL;
1264	}
1265
1266#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1267	if (yellowfin_debug > 0) {
1268		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1269			      bogus_rx);
1270	}
1271#endif
1272
1273	return 0;
1274}
1275
1276/* Set or clear the multicast filter for this adaptor. */
1277
1278static void set_rx_mode(struct net_device *dev)
1279{
1280	struct yellowfin_private *yp = netdev_priv(dev);
1281	void __iomem *ioaddr = yp->base;
1282	u16 cfg_value = ioread16(ioaddr + Cnfg);
1283
1284	/* Stop the Rx process to change any value. */
1285	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1286	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1287		iowrite16(0x000F, ioaddr + AddrMode);
1288	} else if ((netdev_mc_count(dev) > 64) ||
1289		   (dev->flags & IFF_ALLMULTI)) {
1290		/* Too many to filter well, or accept all multicasts. */
1291		iowrite16(0x000B, ioaddr + AddrMode);
1292	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1293		struct netdev_hw_addr *ha;
1294		u16 hash_table[4];
1295		int i;
1296
1297		memset(hash_table, 0, sizeof(hash_table));
1298		netdev_for_each_mc_addr(ha, dev) {
1299			unsigned int bit;
1300
1301			/* Due to a bug in the early chip versions, multiple filter
1302			   slots must be set for each address. */
1303			if (yp->drv_flags & HasMulticastBug) {
1304				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1305				hash_table[bit >> 4] |= (1 << bit);
1306				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1307				hash_table[bit >> 4] |= (1 << bit);
1308				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1309				hash_table[bit >> 4] |= (1 << bit);
1310			}
1311			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1312			hash_table[bit >> 4] |= (1 << bit);
1313		}
1314		/* Copy the hash table to the chip. */
1315		for (i = 0; i < 4; i++)
1316			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1317		iowrite16(0x0003, ioaddr + AddrMode);
1318	} else {					/* Normal, unicast/broadcast-only mode. */
1319		iowrite16(0x0001, ioaddr + AddrMode);
1320	}
1321	/* Restart the Rx process. */
1322	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1323}
1324
1325static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1326{
1327	struct yellowfin_private *np = netdev_priv(dev);
1328
1329	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1330	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1331	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1332}
1333
1334static const struct ethtool_ops ethtool_ops = {
1335	.get_drvinfo = yellowfin_get_drvinfo
1336};
1337
1338static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1339{
1340	struct yellowfin_private *np = netdev_priv(dev);
1341	void __iomem *ioaddr = np->base;
1342	struct mii_ioctl_data *data = if_mii(rq);
1343
1344	switch(cmd) {
1345	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1346		data->phy_id = np->phys[0] & 0x1f;
1347		/* Fall Through */
1348
1349	case SIOCGMIIREG:		/* Read MII PHY register. */
1350		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1351		return 0;
1352
1353	case SIOCSMIIREG:		/* Write MII PHY register. */
1354		if (data->phy_id == np->phys[0]) {
1355			u16 value = data->val_in;
1356			switch (data->reg_num) {
1357			case 0:
1358				/* Check for autonegotiation on or reset. */
1359				np->medialock = (value & 0x9000) ? 0 : 1;
1360				if (np->medialock)
1361					np->full_duplex = (value & 0x0100) ? 1 : 0;
1362				break;
1363			case 4: np->advertising = value; break;
1364			}
1365			/* Perhaps check_duplex(dev), depending on chip semantics. */
1366		}
1367		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1368		return 0;
1369	default:
1370		return -EOPNOTSUPP;
1371	}
1372}
1373
1374
1375static void yellowfin_remove_one(struct pci_dev *pdev)
1376{
1377	struct net_device *dev = pci_get_drvdata(pdev);
1378	struct yellowfin_private *np;
1379
1380	BUG_ON(!dev);
1381	np = netdev_priv(dev);
1382
1383        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1384		np->tx_status_dma);
1385	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1386	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 
1387	unregister_netdev (dev);
1388
1389	pci_iounmap(pdev, np->base);
1390
1391	pci_release_regions (pdev);
1392
1393	free_netdev (dev);
1394}
1395
1396
1397static struct pci_driver yellowfin_driver = {
1398	.name		= DRV_NAME,
1399	.id_table	= yellowfin_pci_tbl,
1400	.probe		= yellowfin_init_one,
1401	.remove		= yellowfin_remove_one,
1402};
1403
1404
1405static int __init yellowfin_init (void)
1406{
1407/* when a module, this is printed whether or not devices are found in probe */
1408#ifdef MODULE
1409	printk(version);
1410#endif
1411	return pci_register_driver(&yellowfin_driver);
1412}
1413
1414
1415static void __exit yellowfin_cleanup (void)
1416{
1417	pci_unregister_driver (&yellowfin_driver);
1418}
1419
1420
1421module_init(yellowfin_init);
1422module_exit(yellowfin_cleanup);
v5.9
   1/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
   2/*
   3	Written 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
  13	It also supports the Symbios Logic version of the same chip core.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Support and updates available at
  21	http://www.scyld.com/network/yellowfin.html
  22	[link no longer provides useful info -jgarzik]
  23
  24*/
  25
  26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  27
  28#define DRV_NAME	"yellowfin"
  29#define DRV_VERSION	"2.1"
  30#define DRV_RELDATE	"Sep 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  37static int max_interrupt_work = 20;
  38static int mtu;
  39#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
  40/* System-wide count of bogus-rx frames. */
  41static int bogus_rx;
  42static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  43static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  44#elif defined(YF_NEW)					/* A future perfect board :->.  */
  45static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
  46static int fifo_cfg = 0x0028;
  47#else
  48static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
  49static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
  50#endif
  51
  52/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  53   Setting to > 1514 effectively disables this feature. */
  54static int rx_copybreak;
  55
  56/* Used to pass the media type, etc.
  57   No media types are currently defined.  These exist for driver
  58   interoperability.
  59*/
  60#define MAX_UNITS 8				/* More are supported, limit only on options */
  61static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  62static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  63
  64/* Do ugly workaround for GX server chipset errata. */
  65static int gx_fix;
  66
  67/* Operational parameters that are set at compile time. */
  68
  69/* Keep the ring sizes a power of two for efficiency.
  70   Making the Tx ring too long decreases the effectiveness of channel
  71   bonding and packet priority.
  72   There are no ill effects from too-large receive rings. */
  73#define TX_RING_SIZE	16
  74#define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
  75#define RX_RING_SIZE	64
  76#define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
  77#define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
  78#define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
  79
  80/* Operational parameters that usually are not changed. */
  81/* Time in jiffies before concluding the transmitter is hung. */
  82#define TX_TIMEOUT  (2*HZ)
  83#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  84
  85#define yellowfin_debug debug
  86
  87#include <linux/module.h>
  88#include <linux/kernel.h>
  89#include <linux/string.h>
  90#include <linux/timer.h>
  91#include <linux/errno.h>
  92#include <linux/ioport.h>
  93#include <linux/interrupt.h>
  94#include <linux/pci.h>
  95#include <linux/init.h>
  96#include <linux/mii.h>
  97#include <linux/netdevice.h>
  98#include <linux/etherdevice.h>
  99#include <linux/skbuff.h>
 100#include <linux/ethtool.h>
 101#include <linux/crc32.h>
 102#include <linux/bitops.h>
 103#include <linux/uaccess.h>
 104#include <asm/processor.h>		/* Processor type for cache alignment. */
 105#include <asm/unaligned.h>
 106#include <asm/io.h>
 107
 108/* These identify the driver base version and may not be removed. */
 109static const char version[] =
 110  KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
 111  "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
 112
 113MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 114MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
 115MODULE_LICENSE("GPL");
 116
 117module_param(max_interrupt_work, int, 0);
 118module_param(mtu, int, 0);
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(options, int, NULL, 0);
 122module_param_array(full_duplex, int, NULL, 0);
 123module_param(gx_fix, int, 0);
 124MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
 125MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
 126MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
 127MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
 128MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
 129MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
 130MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
 131
 132/*
 133				Theory of Operation
 134
 135I. Board Compatibility
 136
 137This device driver is designed for the Packet Engines "Yellowfin" Gigabit
 138Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
 139Symbios 53C885E dual function chip.
 140
 141II. Board-specific settings
 142
 143PCI bus devices are configured by the system at boot time, so no jumpers
 144need to be set on the board.  The system BIOS preferably should assign the
 145PCI INTA signal to an otherwise unused system IRQ line.
 146Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 147interrupt lines.
 148
 149III. Driver operation
 150
 151IIIa. Ring buffers
 152
 153The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
 154This is a descriptor list scheme similar to that used by the EEPro100 and
 155Tulip.  This driver uses two statically allocated fixed-size descriptor lists
 156formed into rings by a branch from the final descriptor to the beginning of
 157the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 158
 159The driver allocates full frame size skbuffs for the Rx ring buffers at
 160open() time and passes the skb->data field to the Yellowfin as receive data
 161buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 162a fresh skbuff is allocated and the frame is copied to the new skbuff.
 163When the incoming frame is larger, the skbuff is passed directly up the
 164protocol stack and replaced by a newly allocated skbuff.
 165
 166The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 167using a full-sized skbuff for small frames vs. the copying costs of larger
 168frames.  For small frames the copying cost is negligible (esp. considering
 169that we are pre-loading the cache with immediately useful header
 170information).  For large frames the copying cost is non-trivial, and the
 171larger copy might flush the cache of useful data.
 172
 173IIIC. Synchronization
 174
 175The driver runs as two independent, single-threaded flows of control.  One
 176is the send-packet routine, which enforces single-threaded use by the
 177dev->tbusy flag.  The other thread is the interrupt handler, which is single
 178threaded by the hardware and other software.
 179
 180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 181flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 183the 'yp->tx_full' flag.
 184
 185The interrupt handler has exclusive control over the Rx ring and records stats
 186from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 187empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
 188clears both the tx_full and tbusy flags.
 189
 190IV. Notes
 191
 192Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
 193Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
 194and an AlphaStation to verifty the Alpha port!
 195
 196IVb. References
 197
 198Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
 199Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
 200   Data Manual v3.0
 201http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
 202http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
 203
 204IVc. Errata
 205
 206See Packet Engines confidential appendix (prototype chips only).
 207*/
 208
 209
 210
 211enum capability_flags {
 212	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
 213	HasMACAddrBug=32, /* Only on early revs.  */
 214	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
 215};
 216
 217/* The PCI I/O space extent. */
 218enum {
 219	YELLOWFIN_SIZE	= 0x100,
 220};
 221
 222struct pci_id_info {
 223        const char *name;
 224        struct match_info {
 225                int     pci, pci_mask, subsystem, subsystem_mask;
 226                int revision, revision_mask;                            /* Only 8 bits. */
 227        } id;
 228        int drv_flags;                          /* Driver use, intended as capability flags. */
 229};
 230
 231static const struct pci_id_info pci_id_tbl[] = {
 232	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
 233	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
 234	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
 235	  HasMII | DontUseEeprom },
 236	{ }
 237};
 238
 239static const struct pci_device_id yellowfin_pci_tbl[] = {
 240	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 241	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 242	{ }
 243};
 244MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
 245
 246
 247/* Offsets to the Yellowfin registers.  Various sizes and alignments. */
 248enum yellowfin_offsets {
 249	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
 250	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
 251	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
 252	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
 253	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
 254	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
 255	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
 256	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
 257	MII_Status=0xAE,
 258	RxDepth=0xB8, FlowCtrl=0xBC,
 259	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
 260	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
 261	EEFeature=0xF5,
 262};
 263
 264/* The Yellowfin Rx and Tx buffer descriptors.
 265   Elements are written as 32 bit for endian portability. */
 266struct yellowfin_desc {
 267	__le32 dbdma_cmd;
 268	__le32 addr;
 269	__le32 branch_addr;
 270	__le32 result_status;
 271};
 272
 273struct tx_status_words {
 274#ifdef __BIG_ENDIAN
 275	u16 tx_errs;
 276	u16 tx_cnt;
 277	u16 paused;
 278	u16 total_tx_cnt;
 279#else  /* Little endian chips. */
 280	u16 tx_cnt;
 281	u16 tx_errs;
 282	u16 total_tx_cnt;
 283	u16 paused;
 284#endif /* __BIG_ENDIAN */
 285};
 286
 287/* Bits in yellowfin_desc.cmd */
 288enum desc_cmd_bits {
 289	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
 290	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
 291	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
 292	BRANCH_IFTRUE=0x040000,
 293};
 294
 295/* Bits in yellowfin_desc.status */
 296enum desc_status_bits { RX_EOP=0x0040, };
 297
 298/* Bits in the interrupt status/mask registers. */
 299enum intr_status_bits {
 300	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
 301	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
 302	IntrEarlyRx=0x100, IntrWakeup=0x200, };
 303
 304#define PRIV_ALIGN	31 	/* Required alignment mask */
 305#define MII_CNT		4
 306struct yellowfin_private {
 307	/* Descriptor rings first for alignment.
 308	   Tx requires a second descriptor for status. */
 309	struct yellowfin_desc *rx_ring;
 310	struct yellowfin_desc *tx_ring;
 311	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 312	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 313	dma_addr_t rx_ring_dma;
 314	dma_addr_t tx_ring_dma;
 315
 316	struct tx_status_words *tx_status;
 317	dma_addr_t tx_status_dma;
 318
 319	struct timer_list timer;	/* Media selection timer. */
 320	/* Frequently used and paired value: keep adjacent for cache effect. */
 321	int chip_id, drv_flags;
 322	struct pci_dev *pci_dev;
 323	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
 324	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 325	struct tx_status_words *tx_tail_desc;
 326	unsigned int cur_tx, dirty_tx;
 327	int tx_threshold;
 328	unsigned int tx_full:1;				/* The Tx queue is full. */
 329	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
 330	unsigned int duplex_lock:1;
 331	unsigned int medialock:1;			/* Do not sense media. */
 332	unsigned int default_port:4;		/* Last dev->if_port value. */
 333	/* MII transceiver section. */
 334	int mii_cnt;						/* MII device addresses. */
 335	u16 advertising;					/* NWay media advertisement */
 336	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
 337	spinlock_t lock;
 338	void __iomem *base;
 339};
 340
 341static int read_eeprom(void __iomem *ioaddr, int location);
 342static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
 343static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
 344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 345static int yellowfin_open(struct net_device *dev);
 346static void yellowfin_timer(struct timer_list *t);
 347static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue);
 348static int yellowfin_init_ring(struct net_device *dev);
 349static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 350					struct net_device *dev);
 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
 352static int yellowfin_rx(struct net_device *dev);
 353static void yellowfin_error(struct net_device *dev, int intr_status);
 354static int yellowfin_close(struct net_device *dev);
 355static void set_rx_mode(struct net_device *dev);
 356static const struct ethtool_ops ethtool_ops;
 357
 358static const struct net_device_ops netdev_ops = {
 359	.ndo_open 		= yellowfin_open,
 360	.ndo_stop 		= yellowfin_close,
 361	.ndo_start_xmit 	= yellowfin_start_xmit,
 362	.ndo_set_rx_mode	= set_rx_mode,
 363	.ndo_validate_addr	= eth_validate_addr,
 364	.ndo_set_mac_address 	= eth_mac_addr,
 365	.ndo_do_ioctl 		= netdev_ioctl,
 366	.ndo_tx_timeout 	= yellowfin_tx_timeout,
 367};
 368
 369static int yellowfin_init_one(struct pci_dev *pdev,
 370			      const struct pci_device_id *ent)
 371{
 372	struct net_device *dev;
 373	struct yellowfin_private *np;
 374	int irq;
 375	int chip_idx = ent->driver_data;
 376	static int find_cnt;
 377	void __iomem *ioaddr;
 378	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
 379	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
 380        void *ring_space;
 381        dma_addr_t ring_dma;
 382#ifdef USE_IO_OPS
 383	int bar = 0;
 384#else
 385	int bar = 1;
 386#endif
 387
 388/* when built into the kernel, we only print version if device is found */
 389#ifndef MODULE
 390	static int printed_version;
 391	if (!printed_version++)
 392		printk(version);
 393#endif
 394
 395	i = pci_enable_device(pdev);
 396	if (i) return i;
 397
 398	dev = alloc_etherdev(sizeof(*np));
 399	if (!dev)
 400		return -ENOMEM;
 401
 402	SET_NETDEV_DEV(dev, &pdev->dev);
 403
 404	np = netdev_priv(dev);
 405
 406	if (pci_request_regions(pdev, DRV_NAME))
 407		goto err_out_free_netdev;
 408
 409	pci_set_master (pdev);
 410
 411	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
 412	if (!ioaddr)
 413		goto err_out_free_res;
 414
 415	irq = pdev->irq;
 416
 417	if (drv_flags & DontUseEeprom)
 418		for (i = 0; i < 6; i++)
 419			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
 420	else {
 421		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
 422		for (i = 0; i < 6; i++)
 423			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
 424	}
 425
 426	/* Reset the chip. */
 427	iowrite32(0x80000000, ioaddr + DMACtrl);
 428
 429	pci_set_drvdata(pdev, dev);
 430	spin_lock_init(&np->lock);
 431
 432	np->pci_dev = pdev;
 433	np->chip_id = chip_idx;
 434	np->drv_flags = drv_flags;
 435	np->base = ioaddr;
 436
 437	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 438					GFP_KERNEL);
 439	if (!ring_space)
 440		goto err_out_cleardev;
 441	np->tx_ring = ring_space;
 442	np->tx_ring_dma = ring_dma;
 443
 444	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 445					GFP_KERNEL);
 446	if (!ring_space)
 447		goto err_out_unmap_tx;
 448	np->rx_ring = ring_space;
 449	np->rx_ring_dma = ring_dma;
 450
 451	ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
 452					&ring_dma, GFP_KERNEL);
 453	if (!ring_space)
 454		goto err_out_unmap_rx;
 455	np->tx_status = ring_space;
 456	np->tx_status_dma = ring_dma;
 457
 458	if (dev->mem_start)
 459		option = dev->mem_start;
 460
 461	/* The lower four bits are the media type. */
 462	if (option > 0) {
 463		if (option & 0x200)
 464			np->full_duplex = 1;
 465		np->default_port = option & 15;
 466		if (np->default_port)
 467			np->medialock = 1;
 468	}
 469	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
 470		np->full_duplex = 1;
 471
 472	if (np->full_duplex)
 473		np->duplex_lock = 1;
 474
 475	/* The Yellowfin-specific entries in the device structure. */
 476	dev->netdev_ops = &netdev_ops;
 477	dev->ethtool_ops = &ethtool_ops;
 478	dev->watchdog_timeo = TX_TIMEOUT;
 479
 480	if (mtu)
 481		dev->mtu = mtu;
 482
 483	i = register_netdev(dev);
 484	if (i)
 485		goto err_out_unmap_status;
 486
 487	netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
 488		    pci_id_tbl[chip_idx].name,
 489		    ioread32(ioaddr + ChipRev), ioaddr,
 490		    dev->dev_addr, irq);
 491
 492	if (np->drv_flags & HasMII) {
 493		int phy, phy_idx = 0;
 494		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
 495			int mii_status = mdio_read(ioaddr, phy, 1);
 496			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 497				np->phys[phy_idx++] = phy;
 498				np->advertising = mdio_read(ioaddr, phy, 4);
 499				netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
 500					    phy, mii_status, np->advertising);
 501			}
 502		}
 503		np->mii_cnt = phy_idx;
 504	}
 505
 506	find_cnt++;
 507
 508	return 0;
 509
 510err_out_unmap_status:
 511	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
 512			  np->tx_status_dma);
 513err_out_unmap_rx:
 514	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 515			  np->rx_ring_dma);
 516err_out_unmap_tx:
 517	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 518			  np->tx_ring_dma);
 519err_out_cleardev:
 520	pci_iounmap(pdev, ioaddr);
 521err_out_free_res:
 522	pci_release_regions(pdev);
 523err_out_free_netdev:
 524	free_netdev (dev);
 525	return -ENODEV;
 526}
 527
 528static int read_eeprom(void __iomem *ioaddr, int location)
 529{
 530	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
 531
 532	iowrite8(location, ioaddr + EEAddr);
 533	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
 534	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
 535		;
 536	return ioread8(ioaddr + EERead);
 537}
 538
 539/* MII Managemen Data I/O accesses.
 540   These routines assume the MDIO controller is idle, and do not exit until
 541   the command is finished. */
 542
 543static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
 544{
 545	int i;
 546
 547	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 548	iowrite16(1, ioaddr + MII_Cmd);
 549	for (i = 10000; i >= 0; i--)
 550		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 551			break;
 552	return ioread16(ioaddr + MII_Rd_Data);
 553}
 554
 555static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
 556{
 557	int i;
 558
 559	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
 560	iowrite16(value, ioaddr + MII_Wr_Data);
 561
 562	/* Wait for the command to finish. */
 563	for (i = 10000; i >= 0; i--)
 564		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
 565			break;
 566}
 567
 568
 569static int yellowfin_open(struct net_device *dev)
 570{
 571	struct yellowfin_private *yp = netdev_priv(dev);
 572	const int irq = yp->pci_dev->irq;
 573	void __iomem *ioaddr = yp->base;
 574	int i, rc;
 575
 576	/* Reset the chip. */
 577	iowrite32(0x80000000, ioaddr + DMACtrl);
 578
 579	rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
 580	if (rc)
 581		return rc;
 582
 583	rc = yellowfin_init_ring(dev);
 584	if (rc < 0)
 585		goto err_free_irq;
 586
 587	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
 588	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
 589
 590	for (i = 0; i < 6; i++)
 591		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
 592
 593	/* Set up various condition 'select' registers.
 594	   There are no options here. */
 595	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
 596	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
 597	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
 598	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
 599	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
 600	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
 601
 602	/* Initialize other registers: with so many this eventually this will
 603	   converted to an offset/value list. */
 604	iowrite32(dma_ctrl, ioaddr + DMACtrl);
 605	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
 606	/* Enable automatic generation of flow control frames, period 0xffff. */
 607	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
 608
 609	yp->tx_threshold = 32;
 610	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
 611
 612	if (dev->if_port == 0)
 613		dev->if_port = yp->default_port;
 614
 615	netif_start_queue(dev);
 616
 617	/* Setting the Rx mode will start the Rx process. */
 618	if (yp->drv_flags & IsGigabit) {
 619		/* We are always in full-duplex mode with gigabit! */
 620		yp->full_duplex = 1;
 621		iowrite16(0x01CF, ioaddr + Cnfg);
 622	} else {
 623		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
 624		iowrite16(0x1018, ioaddr + FrameGap1);
 625		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 626	}
 627	set_rx_mode(dev);
 628
 629	/* Enable interrupts by setting the interrupt mask. */
 630	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
 631	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
 632	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
 633	iowrite32(0x80008000, ioaddr + TxCtrl);
 634
 635	if (yellowfin_debug > 2) {
 636		netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
 637	}
 638
 639	/* Set the timer to check for link beat. */
 640	timer_setup(&yp->timer, yellowfin_timer, 0);
 641	yp->timer.expires = jiffies + 3*HZ;
 642	add_timer(&yp->timer);
 643out:
 644	return rc;
 645
 646err_free_irq:
 647	free_irq(irq, dev);
 648	goto out;
 649}
 650
 651static void yellowfin_timer(struct timer_list *t)
 652{
 653	struct yellowfin_private *yp = from_timer(yp, t, timer);
 654	struct net_device *dev = pci_get_drvdata(yp->pci_dev);
 655	void __iomem *ioaddr = yp->base;
 656	int next_tick = 60*HZ;
 657
 658	if (yellowfin_debug > 3) {
 659		netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
 660			      ioread16(ioaddr + IntrStatus));
 661	}
 662
 663	if (yp->mii_cnt) {
 664		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
 665		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
 666		int negotiated = lpa & yp->advertising;
 667		if (yellowfin_debug > 1)
 668			netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
 669				      yp->phys[0], bmsr, lpa);
 670
 671		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
 672
 673		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
 674
 675		if (bmsr & BMSR_LSTATUS)
 676			next_tick = 60*HZ;
 677		else
 678			next_tick = 3*HZ;
 679	}
 680
 681	yp->timer.expires = jiffies + next_tick;
 682	add_timer(&yp->timer);
 683}
 684
 685static void yellowfin_tx_timeout(struct net_device *dev, unsigned int txqueue)
 686{
 687	struct yellowfin_private *yp = netdev_priv(dev);
 688	void __iomem *ioaddr = yp->base;
 689
 690	netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
 691		    yp->cur_tx, yp->dirty_tx,
 692		    ioread32(ioaddr + TxStatus),
 693		    ioread32(ioaddr + RxStatus));
 694
 695	/* Note: these should be KERN_DEBUG. */
 696	if (yellowfin_debug) {
 697		int i;
 698		pr_warn("  Rx ring %p: ", yp->rx_ring);
 699		for (i = 0; i < RX_RING_SIZE; i++)
 700			pr_cont(" %08x", yp->rx_ring[i].result_status);
 701		pr_cont("\n");
 702		pr_warn("  Tx ring %p: ", yp->tx_ring);
 703		for (i = 0; i < TX_RING_SIZE; i++)
 704			pr_cont(" %04x /%08x",
 705			       yp->tx_status[i].tx_errs,
 706			       yp->tx_ring[i].result_status);
 707		pr_cont("\n");
 708	}
 709
 710	/* If the hardware is found to hang regularly, we will update the code
 711	   to reinitialize the chip here. */
 712	dev->if_port = 0;
 713
 714	/* Wake the potentially-idle transmit channel. */
 715	iowrite32(0x10001000, yp->base + TxCtrl);
 716	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 717		netif_wake_queue (dev);		/* Typical path */
 718
 719	netif_trans_update(dev); /* prevent tx timeout */
 720	dev->stats.tx_errors++;
 721}
 722
 723/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 724static int yellowfin_init_ring(struct net_device *dev)
 725{
 726	struct yellowfin_private *yp = netdev_priv(dev);
 727	int i, j;
 728
 729	yp->tx_full = 0;
 730	yp->cur_rx = yp->cur_tx = 0;
 731	yp->dirty_tx = 0;
 732
 733	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 734
 735	for (i = 0; i < RX_RING_SIZE; i++) {
 736		yp->rx_ring[i].dbdma_cmd =
 737			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
 738		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
 739			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
 740	}
 741
 742	for (i = 0; i < RX_RING_SIZE; i++) {
 743		struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
 744		yp->rx_skbuff[i] = skb;
 745		if (skb == NULL)
 746			break;
 747		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 748		yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 749								 skb->data,
 750								 yp->rx_buf_sz,
 751								 DMA_FROM_DEVICE));
 752	}
 753	if (i != RX_RING_SIZE) {
 754		for (j = 0; j < i; j++)
 755			dev_kfree_skb(yp->rx_skbuff[j]);
 756		return -ENOMEM;
 757	}
 758	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 759	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 760
 761#define NO_TXSTATS
 762#ifdef NO_TXSTATS
 763	/* In this mode the Tx ring needs only a single descriptor. */
 764	for (i = 0; i < TX_RING_SIZE; i++) {
 765		yp->tx_skbuff[i] = NULL;
 766		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
 767		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 768			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
 769	}
 770	/* Wrap ring */
 771	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
 772#else
 773{
 774	/* Tx ring needs a pair of descriptors, the second for the status. */
 775	for (i = 0; i < TX_RING_SIZE; i++) {
 776		j = 2*i;
 777		yp->tx_skbuff[i] = 0;
 778		/* Branch on Tx error. */
 779		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
 780		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 781			(j+1)*sizeof(struct yellowfin_desc));
 782		j++;
 783		if (yp->flags & FullTxStatus) {
 784			yp->tx_ring[j].dbdma_cmd =
 785				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
 786			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
 787			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 788				i*sizeof(struct tx_status_words));
 789		} else {
 790			/* Symbios chips write only tx_errs word. */
 791			yp->tx_ring[j].dbdma_cmd =
 792				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
 793			yp->tx_ring[j].request_cnt = 2;
 794			/* Om pade ummmmm... */
 795			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
 796				i*sizeof(struct tx_status_words) +
 797				&(yp->tx_status[0].tx_errs) -
 798				&(yp->tx_status[0]));
 799		}
 800		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
 801			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
 802	}
 803	/* Wrap ring */
 804	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
 805}
 806#endif
 807	yp->tx_tail_desc = &yp->tx_status[0];
 808	return 0;
 809}
 810
 811static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
 812					struct net_device *dev)
 813{
 814	struct yellowfin_private *yp = netdev_priv(dev);
 815	unsigned entry;
 816	int len = skb->len;
 817
 818	netif_stop_queue (dev);
 819
 820	/* Note: Ordering is important here, set the field with the
 821	   "ownership" bit last, and only then increment cur_tx. */
 822
 823	/* Calculate the next Tx descriptor entry. */
 824	entry = yp->cur_tx % TX_RING_SIZE;
 825
 826	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
 827		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
 828		/* Fix GX chipset errata. */
 829		if (cacheline_end > 24  || cacheline_end == 0) {
 830			len = skb->len + 32 - cacheline_end + 1;
 831			if (skb_padto(skb, len)) {
 832				yp->tx_skbuff[entry] = NULL;
 833				netif_wake_queue(dev);
 834				return NETDEV_TX_OK;
 835			}
 836		}
 837	}
 838	yp->tx_skbuff[entry] = skb;
 839
 840#ifdef NO_TXSTATS
 841	yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 842							     skb->data,
 843							     len, DMA_TO_DEVICE));
 844	yp->tx_ring[entry].result_status = 0;
 845	if (entry >= TX_RING_SIZE-1) {
 846		/* New stop command. */
 847		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
 848		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
 849			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
 850	} else {
 851		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 852		yp->tx_ring[entry].dbdma_cmd =
 853			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
 854	}
 855	yp->cur_tx++;
 856#else
 857	yp->tx_ring[entry<<1].request_cnt = len;
 858	yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
 859								skb->data,
 860								len, DMA_TO_DEVICE));
 861	/* The input_last (status-write) command is constant, but we must
 862	   rewrite the subsequent 'stop' command. */
 863
 864	yp->cur_tx++;
 865	{
 866		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
 867		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
 868	}
 869	/* Final step -- overwrite the old 'stop' command. */
 870
 871	yp->tx_ring[entry<<1].dbdma_cmd =
 872		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
 873					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
 874#endif
 875
 876	/* Non-x86 Todo: explicitly flush cache lines here. */
 877
 878	/* Wake the potentially-idle transmit channel. */
 879	iowrite32(0x10001000, yp->base + TxCtrl);
 880
 881	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
 882		netif_start_queue (dev);		/* Typical path */
 883	else
 884		yp->tx_full = 1;
 885
 886	if (yellowfin_debug > 4) {
 887		netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
 888			      yp->cur_tx, entry);
 889	}
 890	return NETDEV_TX_OK;
 891}
 892
 893/* The interrupt handler does all of the Rx thread work and cleans up
 894   after the Tx thread. */
 895static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
 896{
 897	struct net_device *dev = dev_instance;
 898	struct yellowfin_private *yp;
 899	void __iomem *ioaddr;
 900	int boguscnt = max_interrupt_work;
 901	unsigned int handled = 0;
 902
 903	yp = netdev_priv(dev);
 904	ioaddr = yp->base;
 905
 906	spin_lock (&yp->lock);
 907
 908	do {
 909		u16 intr_status = ioread16(ioaddr + IntrClear);
 910
 911		if (yellowfin_debug > 4)
 912			netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
 913				      intr_status);
 914
 915		if (intr_status == 0)
 916			break;
 917		handled = 1;
 918
 919		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
 920			yellowfin_rx(dev);
 921			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
 922		}
 923
 924#ifdef NO_TXSTATS
 925		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
 926			int entry = yp->dirty_tx % TX_RING_SIZE;
 927			struct sk_buff *skb;
 928
 929			if (yp->tx_ring[entry].result_status == 0)
 930				break;
 931			skb = yp->tx_skbuff[entry];
 932			dev->stats.tx_packets++;
 933			dev->stats.tx_bytes += skb->len;
 934			/* Free the original skb. */
 935			dma_unmap_single(&yp->pci_dev->dev,
 936					 le32_to_cpu(yp->tx_ring[entry].addr),
 937					 skb->len, DMA_TO_DEVICE);
 938			dev_consume_skb_irq(skb);
 939			yp->tx_skbuff[entry] = NULL;
 940		}
 941		if (yp->tx_full &&
 942		    yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
 943			/* The ring is no longer full, clear tbusy. */
 944			yp->tx_full = 0;
 945			netif_wake_queue(dev);
 946		}
 947#else
 948		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
 949			unsigned dirty_tx = yp->dirty_tx;
 950
 951			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
 952				 dirty_tx++) {
 953				/* Todo: optimize this. */
 954				int entry = dirty_tx % TX_RING_SIZE;
 955				u16 tx_errs = yp->tx_status[entry].tx_errs;
 956				struct sk_buff *skb;
 957
 958#ifndef final_version
 959				if (yellowfin_debug > 5)
 960					netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
 961						      entry,
 962						      yp->tx_status[entry].tx_cnt,
 963						      yp->tx_status[entry].tx_errs,
 964						      yp->tx_status[entry].total_tx_cnt,
 965						      yp->tx_status[entry].paused);
 966#endif
 967				if (tx_errs == 0)
 968					break;	/* It still hasn't been Txed */
 969				skb = yp->tx_skbuff[entry];
 970				if (tx_errs & 0xF810) {
 971					/* There was an major error, log it. */
 972#ifndef final_version
 973					if (yellowfin_debug > 1)
 974						netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
 975							      tx_errs);
 976#endif
 977					dev->stats.tx_errors++;
 978					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
 979					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
 980					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
 981					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
 982				} else {
 983#ifndef final_version
 984					if (yellowfin_debug > 4)
 985						netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
 986							      tx_errs);
 987#endif
 988					dev->stats.tx_bytes += skb->len;
 989					dev->stats.collisions += tx_errs & 15;
 990					dev->stats.tx_packets++;
 991				}
 992				/* Free the original skb. */
 993				dma_unmap_single(&yp->pci_dev->dev,
 994						 yp->tx_ring[entry << 1].addr,
 995						 skb->len, DMA_TO_DEVICE);
 996				dev_consume_skb_irq(skb);
 997				yp->tx_skbuff[entry] = 0;
 998				/* Mark status as empty. */
 999				yp->tx_status[entry].tx_errs = 0;
1000			}
1001
1002#ifndef final_version
1003			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1004				netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1005					   dirty_tx, yp->cur_tx, yp->tx_full);
1006				dirty_tx += TX_RING_SIZE;
1007			}
1008#endif
1009
1010			if (yp->tx_full &&
1011			    yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1012				/* The ring is no longer full, clear tbusy. */
1013				yp->tx_full = 0;
1014				netif_wake_queue(dev);
1015			}
1016
1017			yp->dirty_tx = dirty_tx;
1018			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1019		}
1020#endif
1021
1022		/* Log errors and other uncommon events. */
1023		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1024			yellowfin_error(dev, intr_status);
1025
1026		if (--boguscnt < 0) {
1027			netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
1028				    intr_status);
1029			break;
1030		}
1031	} while (1);
1032
1033	if (yellowfin_debug > 3)
1034		netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
1035			      ioread16(ioaddr + IntrStatus));
1036
1037	spin_unlock (&yp->lock);
1038	return IRQ_RETVAL(handled);
1039}
1040
1041/* This routine is logically part of the interrupt handler, but separated
1042   for clarity and better register allocation. */
1043static int yellowfin_rx(struct net_device *dev)
1044{
1045	struct yellowfin_private *yp = netdev_priv(dev);
1046	int entry = yp->cur_rx % RX_RING_SIZE;
1047	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1048
1049	if (yellowfin_debug > 4) {
1050		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
1051			   entry, yp->rx_ring[entry].result_status);
1052		printk(KERN_DEBUG "   #%d desc. %08x %08x %08x\n",
1053			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1054			   yp->rx_ring[entry].result_status);
1055	}
1056
1057	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1058	while (1) {
1059		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1060		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1061		s16 frame_status;
1062		u16 desc_status;
1063		int data_size, yf_size;
1064		u8 *buf_addr;
1065
1066		if(!desc->result_status)
1067			break;
1068		dma_sync_single_for_cpu(&yp->pci_dev->dev,
1069					le32_to_cpu(desc->addr),
1070					yp->rx_buf_sz, DMA_FROM_DEVICE);
1071		desc_status = le32_to_cpu(desc->result_status) >> 16;
1072		buf_addr = rx_skb->data;
1073		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1074			le32_to_cpu(desc->result_status)) & 0xffff;
1075		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1076		if (yellowfin_debug > 4)
1077			printk(KERN_DEBUG "  %s() status was %04x\n",
1078			       __func__, frame_status);
1079		if (--boguscnt < 0)
1080			break;
1081
1082		yf_size = sizeof(struct yellowfin_desc);
1083
1084		if ( ! (desc_status & RX_EOP)) {
1085			if (data_size != 0)
1086				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
1087					    desc_status, data_size);
1088			dev->stats.rx_length_errors++;
1089		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1090			/* There was a error. */
1091			if (yellowfin_debug > 3)
1092				printk(KERN_DEBUG "  %s() Rx error was %04x\n",
1093				       __func__, frame_status);
1094			dev->stats.rx_errors++;
1095			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1096			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1097			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1098			if (frame_status < 0) dev->stats.rx_dropped++;
1099		} else if ( !(yp->drv_flags & IsGigabit)  &&
1100				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1101			u8 status1 = buf_addr[data_size-2];
1102			u8 status2 = buf_addr[data_size-1];
1103			dev->stats.rx_errors++;
1104			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1105			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1106			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1107			if (status2 & 0x80) dev->stats.rx_dropped++;
1108#ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1109		} else if ((yp->flags & HasMACAddrBug)  &&
1110			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1111						      entry * yf_size),
1112					  dev->dev_addr) &&
1113			!ether_addr_equal(le32_to_cpu(yp->rx_ring_dma +
1114						      entry * yf_size),
1115					  "\377\377\377\377\377\377")) {
1116			if (bogus_rx++ == 0)
1117				netdev_warn(dev, "Bad frame to %pM\n",
1118					    buf_addr);
1119#endif
1120		} else {
1121			struct sk_buff *skb;
1122			int pkt_len = data_size -
1123				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1124			/* To verify: Yellowfin Length should omit the CRC! */
1125
1126#ifndef final_version
1127			if (yellowfin_debug > 4)
1128				printk(KERN_DEBUG "  %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
1129				       __func__, pkt_len, data_size, boguscnt);
1130#endif
1131			/* Check if the packet is long enough to just pass up the skbuff
1132			   without copying to a properly sized skbuff. */
1133			if (pkt_len > rx_copybreak) {
1134				skb_put(skb = rx_skb, pkt_len);
1135				dma_unmap_single(&yp->pci_dev->dev,
1136						 le32_to_cpu(yp->rx_ring[entry].addr),
1137						 yp->rx_buf_sz,
1138						 DMA_FROM_DEVICE);
1139				yp->rx_skbuff[entry] = NULL;
1140			} else {
1141				skb = netdev_alloc_skb(dev, pkt_len + 2);
1142				if (skb == NULL)
1143					break;
1144				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1145				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1146				skb_put(skb, pkt_len);
1147				dma_sync_single_for_device(&yp->pci_dev->dev,
1148							   le32_to_cpu(desc->addr),
1149							   yp->rx_buf_sz,
1150							   DMA_FROM_DEVICE);
1151			}
1152			skb->protocol = eth_type_trans(skb, dev);
1153			netif_rx(skb);
1154			dev->stats.rx_packets++;
1155			dev->stats.rx_bytes += pkt_len;
1156		}
1157		entry = (++yp->cur_rx) % RX_RING_SIZE;
1158	}
1159
1160	/* Refill the Rx ring buffers. */
1161	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1162		entry = yp->dirty_rx % RX_RING_SIZE;
1163		if (yp->rx_skbuff[entry] == NULL) {
1164			struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2);
1165			if (skb == NULL)
1166				break;				/* Better luck next round. */
1167			yp->rx_skbuff[entry] = skb;
1168			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1169			yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
1170									     skb->data,
1171									     yp->rx_buf_sz,
1172									     DMA_FROM_DEVICE));
1173		}
1174		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1175		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1176		if (entry != 0)
1177			yp->rx_ring[entry - 1].dbdma_cmd =
1178				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1179		else
1180			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1181				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1182							| yp->rx_buf_sz);
1183	}
1184
1185	return 0;
1186}
1187
1188static void yellowfin_error(struct net_device *dev, int intr_status)
1189{
1190	netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
1191	/* Hmmmmm, it's not clear what to do here. */
1192	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1193		dev->stats.tx_errors++;
1194	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1195		dev->stats.rx_errors++;
1196}
1197
1198static int yellowfin_close(struct net_device *dev)
1199{
1200	struct yellowfin_private *yp = netdev_priv(dev);
1201	void __iomem *ioaddr = yp->base;
1202	int i;
1203
1204	netif_stop_queue (dev);
1205
1206	if (yellowfin_debug > 1) {
1207		netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
1208			      ioread16(ioaddr + TxStatus),
1209			      ioread16(ioaddr + RxStatus),
1210			      ioread16(ioaddr + IntrStatus));
1211		netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d,  Rx %d / %d\n",
1212			      yp->cur_tx, yp->dirty_tx,
1213			      yp->cur_rx, yp->dirty_rx);
1214	}
1215
1216	/* Disable interrupts by clearing the interrupt mask. */
1217	iowrite16(0x0000, ioaddr + IntrEnb);
1218
1219	/* Stop the chip's Tx and Rx processes. */
1220	iowrite32(0x80000000, ioaddr + RxCtrl);
1221	iowrite32(0x80000000, ioaddr + TxCtrl);
1222
1223	del_timer(&yp->timer);
1224
1225#if defined(__i386__)
1226	if (yellowfin_debug > 2) {
1227		printk(KERN_DEBUG "  Tx ring at %08llx:\n",
1228				(unsigned long long)yp->tx_ring_dma);
1229		for (i = 0; i < TX_RING_SIZE*2; i++)
1230			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
1231				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1232				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1233				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1234		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1235		for (i = 0; i < TX_RING_SIZE; i++)
1236			printk(KERN_DEBUG "   #%d status %04x %04x %04x %04x\n",
1237				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1238				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1239
1240		printk(KERN_DEBUG "  Rx ring %08llx:\n",
1241				(unsigned long long)yp->rx_ring_dma);
1242		for (i = 0; i < RX_RING_SIZE; i++) {
1243			printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
1244				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1245				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1246				   yp->rx_ring[i].result_status);
1247			if (yellowfin_debug > 6) {
1248				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1249					int j;
1250
1251					printk(KERN_DEBUG);
1252					for (j = 0; j < 0x50; j++)
1253						pr_cont(" %04x",
1254							get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1255					pr_cont("\n");
1256				}
1257			}
1258		}
1259	}
1260#endif /* __i386__ debugging only */
1261
1262	free_irq(yp->pci_dev->irq, dev);
1263
1264	/* Free all the skbuffs in the Rx queue. */
1265	for (i = 0; i < RX_RING_SIZE; i++) {
1266		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1267		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1268		if (yp->rx_skbuff[i]) {
1269			dev_kfree_skb(yp->rx_skbuff[i]);
1270		}
1271		yp->rx_skbuff[i] = NULL;
1272	}
1273	for (i = 0; i < TX_RING_SIZE; i++) {
1274		dev_kfree_skb(yp->tx_skbuff[i]);
 
1275		yp->tx_skbuff[i] = NULL;
1276	}
1277
1278#ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1279	if (yellowfin_debug > 0) {
1280		netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
1281			      bogus_rx);
1282	}
1283#endif
1284
1285	return 0;
1286}
1287
1288/* Set or clear the multicast filter for this adaptor. */
1289
1290static void set_rx_mode(struct net_device *dev)
1291{
1292	struct yellowfin_private *yp = netdev_priv(dev);
1293	void __iomem *ioaddr = yp->base;
1294	u16 cfg_value = ioread16(ioaddr + Cnfg);
1295
1296	/* Stop the Rx process to change any value. */
1297	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1298	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1299		iowrite16(0x000F, ioaddr + AddrMode);
1300	} else if ((netdev_mc_count(dev) > 64) ||
1301		   (dev->flags & IFF_ALLMULTI)) {
1302		/* Too many to filter well, or accept all multicasts. */
1303		iowrite16(0x000B, ioaddr + AddrMode);
1304	} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1305		struct netdev_hw_addr *ha;
1306		u16 hash_table[4];
1307		int i;
1308
1309		memset(hash_table, 0, sizeof(hash_table));
1310		netdev_for_each_mc_addr(ha, dev) {
1311			unsigned int bit;
1312
1313			/* Due to a bug in the early chip versions, multiple filter
1314			   slots must be set for each address. */
1315			if (yp->drv_flags & HasMulticastBug) {
1316				bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1317				hash_table[bit >> 4] |= (1 << bit);
1318				bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1319				hash_table[bit >> 4] |= (1 << bit);
1320				bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1321				hash_table[bit >> 4] |= (1 << bit);
1322			}
1323			bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1324			hash_table[bit >> 4] |= (1 << bit);
1325		}
1326		/* Copy the hash table to the chip. */
1327		for (i = 0; i < 4; i++)
1328			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1329		iowrite16(0x0003, ioaddr + AddrMode);
1330	} else {					/* Normal, unicast/broadcast-only mode. */
1331		iowrite16(0x0001, ioaddr + AddrMode);
1332	}
1333	/* Restart the Rx process. */
1334	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1335}
1336
1337static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1338{
1339	struct yellowfin_private *np = netdev_priv(dev);
1340
1341	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1342	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1343	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1344}
1345
1346static const struct ethtool_ops ethtool_ops = {
1347	.get_drvinfo = yellowfin_get_drvinfo
1348};
1349
1350static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1351{
1352	struct yellowfin_private *np = netdev_priv(dev);
1353	void __iomem *ioaddr = np->base;
1354	struct mii_ioctl_data *data = if_mii(rq);
1355
1356	switch(cmd) {
1357	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1358		data->phy_id = np->phys[0] & 0x1f;
1359		fallthrough;
1360
1361	case SIOCGMIIREG:		/* Read MII PHY register. */
1362		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1363		return 0;
1364
1365	case SIOCSMIIREG:		/* Write MII PHY register. */
1366		if (data->phy_id == np->phys[0]) {
1367			u16 value = data->val_in;
1368			switch (data->reg_num) {
1369			case 0:
1370				/* Check for autonegotiation on or reset. */
1371				np->medialock = (value & 0x9000) ? 0 : 1;
1372				if (np->medialock)
1373					np->full_duplex = (value & 0x0100) ? 1 : 0;
1374				break;
1375			case 4: np->advertising = value; break;
1376			}
1377			/* Perhaps check_duplex(dev), depending on chip semantics. */
1378		}
1379		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1380		return 0;
1381	default:
1382		return -EOPNOTSUPP;
1383	}
1384}
1385
1386
1387static void yellowfin_remove_one(struct pci_dev *pdev)
1388{
1389	struct net_device *dev = pci_get_drvdata(pdev);
1390	struct yellowfin_private *np;
1391
1392	BUG_ON(!dev);
1393	np = netdev_priv(dev);
1394
1395	dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
1396			  np->tx_status_dma);
1397	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
1398			  np->rx_ring_dma);
1399	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
1400			  np->tx_ring_dma);
1401	unregister_netdev (dev);
1402
1403	pci_iounmap(pdev, np->base);
1404
1405	pci_release_regions (pdev);
1406
1407	free_netdev (dev);
1408}
1409
1410
1411static struct pci_driver yellowfin_driver = {
1412	.name		= DRV_NAME,
1413	.id_table	= yellowfin_pci_tbl,
1414	.probe		= yellowfin_init_one,
1415	.remove		= yellowfin_remove_one,
1416};
1417
1418
1419static int __init yellowfin_init (void)
1420{
1421/* when a module, this is printed whether or not devices are found in probe */
1422#ifdef MODULE
1423	printk(version);
1424#endif
1425	return pci_register_driver(&yellowfin_driver);
1426}
1427
1428
1429static void __exit yellowfin_cleanup (void)
1430{
1431	pci_unregister_driver (&yellowfin_driver);
1432}
1433
1434
1435module_init(yellowfin_init);
1436module_exit(yellowfin_cleanup);