Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3	Written/copyright 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Information and updates available at
  21	http://www.scyld.com/network/epic100.html
  22	[this link no longer provides anything useful -jgarzik]
  23
  24	---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8		/* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE	256
  54#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
  55#define RX_RING_SIZE	256
  56#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <linux/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
  95static char version2[] =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112				Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149#ifdef USE_IO_OPS
 150#define EPIC_BAR	0
 151#else
 152#define EPIC_BAR	1
 153#endif
 154
 155typedef enum {
 156	SMSC_83C170_0,
 157	SMSC_83C170,
 158	SMSC_83C175,
 159} chip_t;
 160
 161
 162struct epic_chip_info {
 163	const char *name;
 164        int drv_flags;                          /* Driver use, intended as capability flags. */
 165};
 166
 167
 168/* indexed by chip_t */
 169static const struct epic_chip_info pci_id_tbl[] = {
 170	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
 171	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
 172	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
 173};
 174
 175
 176static const struct pci_device_id epic_pci_tbl[] = {
 177	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 178	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 179	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 180	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 181	{ 0,}
 182};
 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 184
 185#define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
 186#define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
 187#define er8(reg)	ioread8(ioaddr + (reg))
 188#define er16(reg)	ioread16(ioaddr + (reg))
 189#define er32(reg)	ioread32(ioaddr + (reg))
 190
 191/* Offsets to registers, using the (ugh) SMC names. */
 192enum epic_registers {
 193  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 194  PCIBurstCnt=0x18,
 195  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
 196  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 197  LAN0=64,						/* MAC address. */
 198  MC0=80,						/* Multicast filter table. */
 199  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 200  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 201};
 202
 203/* Interrupt register bits, using my own meaningful names. */
 204enum IntrStatus {
 205	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 206	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 207	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 208	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 209	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 210};
 211enum CommandBits {
 212	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 213	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 214};
 215
 216#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
 217
 218#define EpicNapiEvent	(TxEmpty | TxDone | \
 219			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 220#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
 221
 222static const u16 media2miictl[16] = {
 223	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 224	0, 0, 0, 0,  0, 0, 0, 0 };
 225
 226/*
 227 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 228 * really ARE host-endian; it's not a misannotation.  We tell
 229 * the card to byteswap them internally on big-endian hosts -
 230 * look for #ifdef __BIG_ENDIAN in epic_open().
 231 */
 232
 233struct epic_tx_desc {
 234	u32 txstatus;
 235	u32 bufaddr;
 236	u32 buflength;
 237	u32 next;
 238};
 239
 240struct epic_rx_desc {
 241	u32 rxstatus;
 242	u32 bufaddr;
 243	u32 buflength;
 244	u32 next;
 245};
 246
 247enum desc_status_bits {
 248	DescOwn=0x8000,
 249};
 250
 251#define PRIV_ALIGN	15 	/* Required alignment mask */
 252struct epic_private {
 253	struct epic_rx_desc *rx_ring;
 254	struct epic_tx_desc *tx_ring;
 255	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
 256	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 257	/* The addresses of receive-in-place skbuffs. */
 258	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 259
 260	dma_addr_t tx_ring_dma;
 261	dma_addr_t rx_ring_dma;
 262
 263	/* Ring pointers. */
 264	spinlock_t lock;				/* Group with Tx control cache line. */
 265	spinlock_t napi_lock;
 266	struct napi_struct napi;
 267	unsigned int reschedule_in_poll;
 268	unsigned int cur_tx, dirty_tx;
 269
 270	unsigned int cur_rx, dirty_rx;
 271	u32 irq_mask;
 272	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 273
 274	void __iomem *ioaddr;
 275	struct pci_dev *pci_dev;			/* PCI bus location. */
 276	int chip_id, chip_flags;
 277
 278	struct timer_list timer;			/* Media selection timer. */
 279	int tx_threshold;
 280	unsigned char mc_filter[8];
 281	signed char phys[4];				/* MII device addresses. */
 282	u16 advertising;					/* NWay media advertisement */
 283	int mii_phy_cnt;
 284	struct mii_if_info mii;
 285	unsigned int tx_full:1;				/* The Tx queue is full. */
 286	unsigned int default_port:4;		/* Last dev->if_port value. */
 287};
 288
 289static int epic_open(struct net_device *dev);
 290static int read_eeprom(struct epic_private *, int);
 291static int mdio_read(struct net_device *dev, int phy_id, int location);
 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 293static void epic_restart(struct net_device *dev);
 294static void epic_timer(unsigned long data);
 295static void epic_tx_timeout(struct net_device *dev);
 296static void epic_init_ring(struct net_device *dev);
 297static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 298				   struct net_device *dev);
 299static int epic_rx(struct net_device *dev, int budget);
 300static int epic_poll(struct napi_struct *napi, int budget);
 301static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 302static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static int epic_close(struct net_device *dev);
 305static struct net_device_stats *epic_get_stats(struct net_device *dev);
 306static void set_rx_mode(struct net_device *dev);
 307
 308static const struct net_device_ops epic_netdev_ops = {
 309	.ndo_open		= epic_open,
 310	.ndo_stop		= epic_close,
 311	.ndo_start_xmit		= epic_start_xmit,
 312	.ndo_tx_timeout 	= epic_tx_timeout,
 313	.ndo_get_stats		= epic_get_stats,
 314	.ndo_set_rx_mode	= set_rx_mode,
 315	.ndo_do_ioctl 		= netdev_ioctl,
 316	.ndo_set_mac_address 	= eth_mac_addr,
 317	.ndo_validate_addr	= eth_validate_addr,
 318};
 319
 320static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 321{
 322	static int card_idx = -1;
 323	void __iomem *ioaddr;
 324	int chip_idx = (int) ent->driver_data;
 325	int irq;
 326	struct net_device *dev;
 327	struct epic_private *ep;
 328	int i, ret, option = 0, duplex = 0;
 329	void *ring_space;
 330	dma_addr_t ring_dma;
 331
 332/* when built into the kernel, we only print version if device is found */
 333#ifndef MODULE
 334	pr_info_once("%s%s\n", version, version2);
 335#endif
 336
 337	card_idx++;
 338
 339	ret = pci_enable_device(pdev);
 340	if (ret)
 341		goto out;
 342	irq = pdev->irq;
 343
 344	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 345		dev_err(&pdev->dev, "no PCI region space\n");
 346		ret = -ENODEV;
 347		goto err_out_disable;
 348	}
 349
 350	pci_set_master(pdev);
 351
 352	ret = pci_request_regions(pdev, DRV_NAME);
 353	if (ret < 0)
 354		goto err_out_disable;
 355
 356	ret = -ENOMEM;
 357
 358	dev = alloc_etherdev(sizeof (*ep));
 359	if (!dev)
 360		goto err_out_free_res;
 361
 362	SET_NETDEV_DEV(dev, &pdev->dev);
 363
 364	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 365	if (!ioaddr) {
 366		dev_err(&pdev->dev, "ioremap failed\n");
 367		goto err_out_free_netdev;
 368	}
 369
 370	pci_set_drvdata(pdev, dev);
 371	ep = netdev_priv(dev);
 372	ep->ioaddr = ioaddr;
 373	ep->mii.dev = dev;
 374	ep->mii.mdio_read = mdio_read;
 375	ep->mii.mdio_write = mdio_write;
 376	ep->mii.phy_id_mask = 0x1f;
 377	ep->mii.reg_num_mask = 0x1f;
 378
 379	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 380	if (!ring_space)
 381		goto err_out_iounmap;
 382	ep->tx_ring = ring_space;
 383	ep->tx_ring_dma = ring_dma;
 384
 385	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 386	if (!ring_space)
 387		goto err_out_unmap_tx;
 388	ep->rx_ring = ring_space;
 389	ep->rx_ring_dma = ring_dma;
 390
 391	if (dev->mem_start) {
 392		option = dev->mem_start;
 393		duplex = (dev->mem_start & 16) ? 1 : 0;
 394	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 395		if (options[card_idx] >= 0)
 396			option = options[card_idx];
 397		if (full_duplex[card_idx] >= 0)
 398			duplex = full_duplex[card_idx];
 399	}
 400
 401	spin_lock_init(&ep->lock);
 402	spin_lock_init(&ep->napi_lock);
 403	ep->reschedule_in_poll = 0;
 404
 405	/* Bring the chip out of low-power mode. */
 406	ew32(GENCTL, 0x4200);
 407	/* Magic?!  If we don't set this bit the MII interface won't work. */
 408	/* This magic is documented in SMSC app note 7.15 */
 409	for (i = 16; i > 0; i--)
 410		ew32(TEST1, 0x0008);
 411
 412	/* Turn on the MII transceiver. */
 413	ew32(MIICfg, 0x12);
 414	if (chip_idx == 1)
 415		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 416	ew32(GENCTL, 0x0200);
 417
 418	/* Note: the '175 does not have a serial EEPROM. */
 419	for (i = 0; i < 3; i++)
 420		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 421
 422	if (debug > 2) {
 423		dev_dbg(&pdev->dev, "EEPROM contents:\n");
 424		for (i = 0; i < 64; i++)
 425			pr_cont(" %4.4x%s", read_eeprom(ep, i),
 426				   i % 16 == 15 ? "\n" : "");
 427	}
 428
 429	ep->pci_dev = pdev;
 430	ep->chip_id = chip_idx;
 431	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 432	ep->irq_mask =
 433		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 434		 | CntFull | TxUnderrun | EpicNapiEvent;
 435
 436	/* Find the connected MII xcvrs.
 437	   Doing this in open() would allow detecting external xcvrs later, but
 438	   takes much time and no cards have external MII. */
 439	{
 440		int phy, phy_idx = 0;
 441		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 442			int mii_status = mdio_read(dev, phy, MII_BMSR);
 443			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 444				ep->phys[phy_idx++] = phy;
 445				dev_info(&pdev->dev,
 446					"MII transceiver #%d control "
 447					"%4.4x status %4.4x.\n",
 448					phy, mdio_read(dev, phy, 0), mii_status);
 449			}
 450		}
 451		ep->mii_phy_cnt = phy_idx;
 452		if (phy_idx != 0) {
 453			phy = ep->phys[0];
 454			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 455			dev_info(&pdev->dev,
 456				"Autonegotiation advertising %4.4x link "
 457				   "partner %4.4x.\n",
 458				   ep->mii.advertising, mdio_read(dev, phy, 5));
 459		} else if ( ! (ep->chip_flags & NO_MII)) {
 460			dev_warn(&pdev->dev,
 461				"***WARNING***: No MII transceiver found!\n");
 462			/* Use the known PHY address of the EPII. */
 463			ep->phys[0] = 3;
 464		}
 465		ep->mii.phy_id = ep->phys[0];
 466	}
 467
 468	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 469	if (ep->chip_flags & MII_PWRDWN)
 470		ew32(NVCTL, er32(NVCTL) & ~0x483c);
 471	ew32(GENCTL, 0x0008);
 472
 473	/* The lower four bits are the media type. */
 474	if (duplex) {
 475		ep->mii.force_media = ep->mii.full_duplex = 1;
 476		dev_info(&pdev->dev, "Forced full duplex requested.\n");
 477	}
 478	dev->if_port = ep->default_port = option;
 479
 480	/* The Epic-specific entries in the device structure. */
 481	dev->netdev_ops = &epic_netdev_ops;
 482	dev->ethtool_ops = &netdev_ethtool_ops;
 483	dev->watchdog_timeo = TX_TIMEOUT;
 484	netif_napi_add(dev, &ep->napi, epic_poll, 64);
 485
 486	ret = register_netdev(dev);
 487	if (ret < 0)
 488		goto err_out_unmap_rx;
 489
 490	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
 491		    pci_id_tbl[chip_idx].name,
 492		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 493		    dev->dev_addr);
 494
 495out:
 496	return ret;
 497
 498err_out_unmap_rx:
 499	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 500err_out_unmap_tx:
 501	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 502err_out_iounmap:
 503	pci_iounmap(pdev, ioaddr);
 504err_out_free_netdev:
 505	free_netdev(dev);
 506err_out_free_res:
 507	pci_release_regions(pdev);
 508err_out_disable:
 509	pci_disable_device(pdev);
 510	goto out;
 511}
 512
 513/* Serial EEPROM section. */
 514
 515/*  EEPROM_Ctrl bits. */
 516#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
 517#define EE_CS			0x02	/* EEPROM chip select. */
 518#define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
 519#define EE_WRITE_0		0x01
 520#define EE_WRITE_1		0x09
 521#define EE_DATA_READ	0x10	/* EEPROM chip data out. */
 522#define EE_ENB			(0x0001 | EE_CS)
 523
 524/* Delay between EEPROM clock transitions.
 525   This serves to flush the operation to the PCI bus.
 526 */
 527
 528#define eeprom_delay()	er32(EECTL)
 529
 530/* The EEPROM commands include the alway-set leading bit. */
 531#define EE_WRITE_CMD	(5 << 6)
 532#define EE_READ64_CMD	(6 << 6)
 533#define EE_READ256_CMD	(6 << 8)
 534#define EE_ERASE_CMD	(7 << 6)
 535
 536static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 537{
 538	void __iomem *ioaddr = ep->ioaddr;
 539
 540	ew32(INTMASK, 0x00000000);
 541}
 542
 543static inline void __epic_pci_commit(void __iomem *ioaddr)
 544{
 545#ifndef USE_IO_OPS
 546	er32(INTMASK);
 547#endif
 548}
 549
 550static inline void epic_napi_irq_off(struct net_device *dev,
 551				     struct epic_private *ep)
 552{
 553	void __iomem *ioaddr = ep->ioaddr;
 554
 555	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 556	__epic_pci_commit(ioaddr);
 557}
 558
 559static inline void epic_napi_irq_on(struct net_device *dev,
 560				    struct epic_private *ep)
 561{
 562	void __iomem *ioaddr = ep->ioaddr;
 563
 564	/* No need to commit possible posted write */
 565	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 566}
 567
 568static int read_eeprom(struct epic_private *ep, int location)
 569{
 570	void __iomem *ioaddr = ep->ioaddr;
 571	int i;
 572	int retval = 0;
 573	int read_cmd = location |
 574		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 575
 576	ew32(EECTL, EE_ENB & ~EE_CS);
 577	ew32(EECTL, EE_ENB);
 578
 579	/* Shift the read command bits out. */
 580	for (i = 12; i >= 0; i--) {
 581		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 582		ew32(EECTL, EE_ENB | dataval);
 583		eeprom_delay();
 584		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 585		eeprom_delay();
 586	}
 587	ew32(EECTL, EE_ENB);
 588
 589	for (i = 16; i > 0; i--) {
 590		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 591		eeprom_delay();
 592		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
 593		ew32(EECTL, EE_ENB);
 594		eeprom_delay();
 595	}
 596
 597	/* Terminate the EEPROM access. */
 598	ew32(EECTL, EE_ENB & ~EE_CS);
 599	return retval;
 600}
 601
 602#define MII_READOP		1
 603#define MII_WRITEOP		2
 604static int mdio_read(struct net_device *dev, int phy_id, int location)
 605{
 606	struct epic_private *ep = netdev_priv(dev);
 607	void __iomem *ioaddr = ep->ioaddr;
 608	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 609	int i;
 610
 611	ew32(MIICtrl, read_cmd);
 612	/* Typical operation takes 25 loops. */
 613	for (i = 400; i > 0; i--) {
 614		barrier();
 615		if ((er32(MIICtrl) & MII_READOP) == 0) {
 616			/* Work around read failure bug. */
 617			if (phy_id == 1 && location < 6 &&
 618			    er16(MIIData) == 0xffff) {
 619				ew32(MIICtrl, read_cmd);
 620				continue;
 621			}
 622			return er16(MIIData);
 623		}
 624	}
 625	return 0xffff;
 626}
 627
 628static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 629{
 630	struct epic_private *ep = netdev_priv(dev);
 631	void __iomem *ioaddr = ep->ioaddr;
 632	int i;
 633
 634	ew16(MIIData, value);
 635	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 636	for (i = 10000; i > 0; i--) {
 637		barrier();
 638		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 639			break;
 640	}
 641}
 642
 643
 644static int epic_open(struct net_device *dev)
 645{
 646	struct epic_private *ep = netdev_priv(dev);
 647	void __iomem *ioaddr = ep->ioaddr;
 648	const int irq = ep->pci_dev->irq;
 649	int rc, i;
 650
 651	/* Soft reset the chip. */
 652	ew32(GENCTL, 0x4001);
 653
 654	napi_enable(&ep->napi);
 655	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
 656	if (rc) {
 657		napi_disable(&ep->napi);
 658		return rc;
 659	}
 660
 661	epic_init_ring(dev);
 662
 663	ew32(GENCTL, 0x4000);
 664	/* This magic is documented in SMSC app note 7.15 */
 665	for (i = 16; i > 0; i--)
 666		ew32(TEST1, 0x0008);
 667
 668	/* Pull the chip out of low-power mode, enable interrupts, and set for
 669	   PCI read multiple.  The MIIcfg setting and strange write order are
 670	   required by the details of which bits are reset and the transceiver
 671	   wiring on the Ositech CardBus card.
 672	*/
 673#if 0
 674	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 675#endif
 676	if (ep->chip_flags & MII_PWRDWN)
 677		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 678
 679	/* Tell the chip to byteswap descriptors on big-endian hosts */
 680#ifdef __BIG_ENDIAN
 681	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
 682	er32(GENCTL);
 683	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 684#else
 685	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
 686	er32(GENCTL);
 687	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 688#endif
 689
 690	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 691
 692	for (i = 0; i < 3; i++)
 693		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 694
 695	ep->tx_threshold = TX_FIFO_THRESH;
 696	ew32(TxThresh, ep->tx_threshold);
 697
 698	if (media2miictl[dev->if_port & 15]) {
 699		if (ep->mii_phy_cnt)
 700			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 701		if (dev->if_port == 1) {
 702			if (debug > 1)
 703				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
 704					    mdio_read(dev, ep->phys[0], MII_BMSR));
 705		}
 706	} else {
 707		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 708		if (mii_lpa != 0xffff) {
 709			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 710				ep->mii.full_duplex = 1;
 711			else if (! (mii_lpa & LPA_LPACK))
 712				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 713			if (debug > 1)
 714				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
 715					    ep->mii.full_duplex ? "full"
 716								: "half",
 717					    ep->phys[0], mii_lpa);
 718		}
 719	}
 720
 721	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 722	ew32(PRxCDAR, ep->rx_ring_dma);
 723	ew32(PTxCDAR, ep->tx_ring_dma);
 724
 725	/* Start the chip's Rx process. */
 726	set_rx_mode(dev);
 727	ew32(COMMAND, StartRx | RxQueued);
 728
 729	netif_start_queue(dev);
 730
 731	/* Enable interrupts by setting the interrupt mask. */
 732	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 733	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 734	     TxUnderrun);
 735
 736	if (debug > 1) {
 737		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
 738			   ioaddr, irq, er32(GENCTL),
 739			   ep->mii.full_duplex ? "full" : "half");
 740	}
 741
 742	/* Set the timer to switch to check for link beat and perhaps switch
 743	   to an alternate media type. */
 744	init_timer(&ep->timer);
 745	ep->timer.expires = jiffies + 3*HZ;
 746	ep->timer.data = (unsigned long)dev;
 747	ep->timer.function = epic_timer;				/* timer handler */
 748	add_timer(&ep->timer);
 749
 750	return rc;
 751}
 752
 753/* Reset the chip to recover from a PCI transaction error.
 754   This may occur at interrupt time. */
 755static void epic_pause(struct net_device *dev)
 756{
 757	struct net_device_stats *stats = &dev->stats;
 758	struct epic_private *ep = netdev_priv(dev);
 759	void __iomem *ioaddr = ep->ioaddr;
 760
 761	netif_stop_queue (dev);
 762
 763	/* Disable interrupts by clearing the interrupt mask. */
 764	ew32(INTMASK, 0x00000000);
 765	/* Stop the chip's Tx and Rx DMA processes. */
 766	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 767
 768	/* Update the error counts. */
 769	if (er16(COMMAND) != 0xffff) {
 770		stats->rx_missed_errors	+= er8(MPCNT);
 771		stats->rx_frame_errors	+= er8(ALICNT);
 772		stats->rx_crc_errors	+= er8(CRCCNT);
 773	}
 774
 775	/* Remove the packets on the Rx queue. */
 776	epic_rx(dev, RX_RING_SIZE);
 777}
 778
 779static void epic_restart(struct net_device *dev)
 780{
 781	struct epic_private *ep = netdev_priv(dev);
 782	void __iomem *ioaddr = ep->ioaddr;
 783	int i;
 784
 785	/* Soft reset the chip. */
 786	ew32(GENCTL, 0x4001);
 787
 788	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 789		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 790	udelay(1);
 791
 792	/* This magic is documented in SMSC app note 7.15 */
 793	for (i = 16; i > 0; i--)
 794		ew32(TEST1, 0x0008);
 795
 796#ifdef __BIG_ENDIAN
 797	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 798#else
 799	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 800#endif
 801	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 802	if (ep->chip_flags & MII_PWRDWN)
 803		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 804
 805	for (i = 0; i < 3; i++)
 806		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 807
 808	ep->tx_threshold = TX_FIFO_THRESH;
 809	ew32(TxThresh, ep->tx_threshold);
 810	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 811	ew32(PRxCDAR, ep->rx_ring_dma +
 812	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
 813	ew32(PTxCDAR, ep->tx_ring_dma +
 814	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 815
 816	/* Start the chip's Rx process. */
 817	set_rx_mode(dev);
 818	ew32(COMMAND, StartRx | RxQueued);
 819
 820	/* Enable interrupts by setting the interrupt mask. */
 821	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 822	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 823	     TxUnderrun);
 824
 825	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
 826		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 827}
 828
 829static void check_media(struct net_device *dev)
 830{
 831	struct epic_private *ep = netdev_priv(dev);
 832	void __iomem *ioaddr = ep->ioaddr;
 833	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 834	int negotiated = mii_lpa & ep->mii.advertising;
 835	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 836
 837	if (ep->mii.force_media)
 838		return;
 839	if (mii_lpa == 0xffff)		/* Bogus read */
 840		return;
 841	if (ep->mii.full_duplex != duplex) {
 842		ep->mii.full_duplex = duplex;
 843		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
 844			    ep->mii.full_duplex ? "full" : "half",
 845			    ep->phys[0], mii_lpa);
 846		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 847	}
 848}
 849
 850static void epic_timer(unsigned long data)
 851{
 852	struct net_device *dev = (struct net_device *)data;
 853	struct epic_private *ep = netdev_priv(dev);
 854	void __iomem *ioaddr = ep->ioaddr;
 855	int next_tick = 5*HZ;
 856
 857	if (debug > 3) {
 858		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
 859			   er32(TxSTAT));
 860		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
 861			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 862	}
 863
 864	check_media(dev);
 865
 866	ep->timer.expires = jiffies + next_tick;
 867	add_timer(&ep->timer);
 868}
 869
 870static void epic_tx_timeout(struct net_device *dev)
 871{
 872	struct epic_private *ep = netdev_priv(dev);
 873	void __iomem *ioaddr = ep->ioaddr;
 874
 875	if (debug > 0) {
 876		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
 877			    er16(TxSTAT));
 878		if (debug > 1) {
 879			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
 880				   ep->dirty_tx, ep->cur_tx);
 881		}
 882	}
 883	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
 884		dev->stats.tx_fifo_errors++;
 885		ew32(COMMAND, RestartTx);
 886	} else {
 887		epic_restart(dev);
 888		ew32(COMMAND, TxQueued);
 889	}
 890
 891	netif_trans_update(dev); /* prevent tx timeout */
 892	dev->stats.tx_errors++;
 893	if (!ep->tx_full)
 894		netif_wake_queue(dev);
 895}
 896
 897/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 898static void epic_init_ring(struct net_device *dev)
 899{
 900	struct epic_private *ep = netdev_priv(dev);
 901	int i;
 902
 903	ep->tx_full = 0;
 904	ep->dirty_tx = ep->cur_tx = 0;
 905	ep->cur_rx = ep->dirty_rx = 0;
 906	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 907
 908	/* Initialize all Rx descriptors. */
 909	for (i = 0; i < RX_RING_SIZE; i++) {
 910		ep->rx_ring[i].rxstatus = 0;
 911		ep->rx_ring[i].buflength = ep->rx_buf_sz;
 912		ep->rx_ring[i].next = ep->rx_ring_dma +
 913				      (i+1)*sizeof(struct epic_rx_desc);
 914		ep->rx_skbuff[i] = NULL;
 915	}
 916	/* Mark the last entry as wrapping the ring. */
 917	ep->rx_ring[i-1].next = ep->rx_ring_dma;
 918
 919	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 920	for (i = 0; i < RX_RING_SIZE; i++) {
 921		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
 922		ep->rx_skbuff[i] = skb;
 923		if (skb == NULL)
 924			break;
 925		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 926		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 927			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 928		ep->rx_ring[i].rxstatus = DescOwn;
 929	}
 930	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 931
 932	/* The Tx buffer descriptor is filled in as needed, but we
 933	   do need to clear the ownership bit. */
 934	for (i = 0; i < TX_RING_SIZE; i++) {
 935		ep->tx_skbuff[i] = NULL;
 936		ep->tx_ring[i].txstatus = 0x0000;
 937		ep->tx_ring[i].next = ep->tx_ring_dma +
 938			(i+1)*sizeof(struct epic_tx_desc);
 939	}
 940	ep->tx_ring[i-1].next = ep->tx_ring_dma;
 941}
 942
 943static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 944{
 945	struct epic_private *ep = netdev_priv(dev);
 946	void __iomem *ioaddr = ep->ioaddr;
 947	int entry, free_count;
 948	u32 ctrl_word;
 949	unsigned long flags;
 950
 951	if (skb_padto(skb, ETH_ZLEN))
 952		return NETDEV_TX_OK;
 953
 954	/* Caution: the write order is important here, set the field with the
 955	   "ownership" bit last. */
 956
 957	/* Calculate the next Tx descriptor entry. */
 958	spin_lock_irqsave(&ep->lock, flags);
 959	free_count = ep->cur_tx - ep->dirty_tx;
 960	entry = ep->cur_tx % TX_RING_SIZE;
 961
 962	ep->tx_skbuff[entry] = skb;
 963	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 964		 			            skb->len, PCI_DMA_TODEVICE);
 965	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 966		ctrl_word = 0x100000; /* No interrupt */
 967	} else if (free_count == TX_QUEUE_LEN/2) {
 968		ctrl_word = 0x140000; /* Tx-done intr. */
 969	} else if (free_count < TX_QUEUE_LEN - 1) {
 970		ctrl_word = 0x100000; /* No Tx-done intr. */
 971	} else {
 972		/* Leave room for an additional entry. */
 973		ctrl_word = 0x140000; /* Tx-done intr. */
 974		ep->tx_full = 1;
 975	}
 976	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 977	ep->tx_ring[entry].txstatus =
 978		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 979			    | DescOwn;
 980
 981	ep->cur_tx++;
 982	if (ep->tx_full)
 983		netif_stop_queue(dev);
 984
 985	spin_unlock_irqrestore(&ep->lock, flags);
 986	/* Trigger an immediate transmit demand. */
 987	ew32(COMMAND, TxQueued);
 988
 989	if (debug > 4)
 990		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
 991			   skb->len, entry, ctrl_word, er32(TxSTAT));
 992
 993	return NETDEV_TX_OK;
 994}
 995
 996static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
 997			  int status)
 998{
 999	struct net_device_stats *stats = &dev->stats;
1000
1001#ifndef final_version
1002	/* There was an major error, log it. */
1003	if (debug > 1)
1004		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1005			   status);
1006#endif
1007	stats->tx_errors++;
1008	if (status & 0x1050)
1009		stats->tx_aborted_errors++;
1010	if (status & 0x0008)
1011		stats->tx_carrier_errors++;
1012	if (status & 0x0040)
1013		stats->tx_window_errors++;
1014	if (status & 0x0010)
1015		stats->tx_fifo_errors++;
1016}
1017
1018static void epic_tx(struct net_device *dev, struct epic_private *ep)
1019{
1020	unsigned int dirty_tx, cur_tx;
1021
1022	/*
1023	 * Note: if this lock becomes a problem we can narrow the locked
1024	 * region at the cost of occasionally grabbing the lock more times.
1025	 */
1026	cur_tx = ep->cur_tx;
1027	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1028		struct sk_buff *skb;
1029		int entry = dirty_tx % TX_RING_SIZE;
1030		int txstatus = ep->tx_ring[entry].txstatus;
1031
1032		if (txstatus & DescOwn)
1033			break;	/* It still hasn't been Txed */
1034
1035		if (likely(txstatus & 0x0001)) {
1036			dev->stats.collisions += (txstatus >> 8) & 15;
1037			dev->stats.tx_packets++;
1038			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1039		} else
1040			epic_tx_error(dev, ep, txstatus);
1041
1042		/* Free the original skb. */
1043		skb = ep->tx_skbuff[entry];
1044		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1045				 skb->len, PCI_DMA_TODEVICE);
1046		dev_kfree_skb_irq(skb);
1047		ep->tx_skbuff[entry] = NULL;
1048	}
1049
1050#ifndef final_version
1051	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1052		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1053			    dirty_tx, cur_tx, ep->tx_full);
1054		dirty_tx += TX_RING_SIZE;
1055	}
1056#endif
1057	ep->dirty_tx = dirty_tx;
1058	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1059		/* The ring is no longer full, allow new TX entries. */
1060		ep->tx_full = 0;
1061		netif_wake_queue(dev);
1062	}
1063}
1064
1065/* The interrupt handler does all of the Rx thread work and cleans up
1066   after the Tx thread. */
1067static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1068{
1069	struct net_device *dev = dev_instance;
1070	struct epic_private *ep = netdev_priv(dev);
1071	void __iomem *ioaddr = ep->ioaddr;
1072	unsigned int handled = 0;
1073	int status;
1074
1075	status = er32(INTSTAT);
1076	/* Acknowledge all of the current interrupt sources ASAP. */
1077	ew32(INTSTAT, status & EpicNormalEvent);
1078
1079	if (debug > 4) {
1080		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1081			   status, er32(INTSTAT));
1082	}
1083
1084	if ((status & IntrSummary) == 0)
1085		goto out;
1086
1087	handled = 1;
1088
1089	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1090		spin_lock(&ep->napi_lock);
1091		if (napi_schedule_prep(&ep->napi)) {
1092			epic_napi_irq_off(dev, ep);
1093			__napi_schedule(&ep->napi);
1094		} else
1095			ep->reschedule_in_poll++;
1096		spin_unlock(&ep->napi_lock);
1097	}
1098	status &= ~EpicNapiEvent;
1099
1100	/* Check uncommon events all at once. */
1101	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1102		struct net_device_stats *stats = &dev->stats;
1103
1104		if (status == EpicRemoved)
1105			goto out;
1106
1107		/* Always update the error counts to avoid overhead later. */
1108		stats->rx_missed_errors	+= er8(MPCNT);
1109		stats->rx_frame_errors	+= er8(ALICNT);
1110		stats->rx_crc_errors	+= er8(CRCCNT);
1111
1112		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1113			stats->tx_fifo_errors++;
1114			ew32(TxThresh, ep->tx_threshold += 128);
1115			/* Restart the transmit process. */
1116			ew32(COMMAND, RestartTx);
1117		}
1118		if (status & PCIBusErr170) {
1119			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1120				   status);
1121			epic_pause(dev);
1122			epic_restart(dev);
1123		}
1124		/* Clear all error sources. */
1125		ew32(INTSTAT, status & 0x7f18);
1126	}
1127
1128out:
1129	if (debug > 3) {
1130		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1131			   status);
1132	}
1133
1134	return IRQ_RETVAL(handled);
1135}
1136
1137static int epic_rx(struct net_device *dev, int budget)
1138{
1139	struct epic_private *ep = netdev_priv(dev);
1140	int entry = ep->cur_rx % RX_RING_SIZE;
1141	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1142	int work_done = 0;
1143
1144	if (debug > 4)
1145		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1146			   ep->rx_ring[entry].rxstatus);
1147
1148	if (rx_work_limit > budget)
1149		rx_work_limit = budget;
1150
1151	/* If we own the next entry, it's a new packet. Send it up. */
1152	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1153		int status = ep->rx_ring[entry].rxstatus;
1154
1155		if (debug > 4)
1156			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1157				   status);
1158		if (--rx_work_limit < 0)
1159			break;
1160		if (status & 0x2006) {
1161			if (debug > 2)
1162				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1163					   status);
1164			if (status & 0x2000) {
1165				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1166					    status);
1167				dev->stats.rx_length_errors++;
1168			} else if (status & 0x0006)
1169				/* Rx Frame errors are counted in hardware. */
1170				dev->stats.rx_errors++;
1171		} else {
1172			/* Malloc up new buffer, compatible with net-2e. */
1173			/* Omit the four octet CRC from the length. */
1174			short pkt_len = (status >> 16) - 4;
1175			struct sk_buff *skb;
1176
1177			if (pkt_len > PKT_BUF_SZ - 4) {
1178				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1179					   status, pkt_len);
1180				pkt_len = 1514;
1181			}
1182			/* Check if the packet is long enough to accept without copying
1183			   to a minimally-sized skbuff. */
1184			if (pkt_len < rx_copybreak &&
1185			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1186				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1187				pci_dma_sync_single_for_cpu(ep->pci_dev,
1188							    ep->rx_ring[entry].bufaddr,
1189							    ep->rx_buf_sz,
1190							    PCI_DMA_FROMDEVICE);
1191				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1192				skb_put(skb, pkt_len);
1193				pci_dma_sync_single_for_device(ep->pci_dev,
1194							       ep->rx_ring[entry].bufaddr,
1195							       ep->rx_buf_sz,
1196							       PCI_DMA_FROMDEVICE);
1197			} else {
1198				pci_unmap_single(ep->pci_dev,
1199					ep->rx_ring[entry].bufaddr,
1200					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1201				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1202				ep->rx_skbuff[entry] = NULL;
1203			}
1204			skb->protocol = eth_type_trans(skb, dev);
1205			netif_receive_skb(skb);
1206			dev->stats.rx_packets++;
1207			dev->stats.rx_bytes += pkt_len;
1208		}
1209		work_done++;
1210		entry = (++ep->cur_rx) % RX_RING_SIZE;
1211	}
1212
1213	/* Refill the Rx ring buffers. */
1214	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1215		entry = ep->dirty_rx % RX_RING_SIZE;
1216		if (ep->rx_skbuff[entry] == NULL) {
1217			struct sk_buff *skb;
1218			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1219			if (skb == NULL)
1220				break;
1221			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1222			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1223				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1224			work_done++;
1225		}
1226		/* AV: shouldn't we add a barrier here? */
1227		ep->rx_ring[entry].rxstatus = DescOwn;
1228	}
1229	return work_done;
1230}
1231
1232static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1233{
1234	void __iomem *ioaddr = ep->ioaddr;
1235	int status;
1236
1237	status = er32(INTSTAT);
1238
1239	if (status == EpicRemoved)
1240		return;
1241	if (status & RxOverflow) 	/* Missed a Rx frame. */
1242		dev->stats.rx_errors++;
1243	if (status & (RxOverflow | RxFull))
1244		ew16(COMMAND, RxQueued);
1245}
1246
1247static int epic_poll(struct napi_struct *napi, int budget)
1248{
1249	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1250	struct net_device *dev = ep->mii.dev;
1251	int work_done = 0;
1252	void __iomem *ioaddr = ep->ioaddr;
1253
1254rx_action:
1255
1256	epic_tx(dev, ep);
1257
1258	work_done += epic_rx(dev, budget);
1259
1260	epic_rx_err(dev, ep);
1261
1262	if (work_done < budget) {
1263		unsigned long flags;
1264		int more;
1265
1266		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1267
1268		spin_lock_irqsave(&ep->napi_lock, flags);
1269
1270		more = ep->reschedule_in_poll;
1271		if (!more) {
1272			__napi_complete(napi);
1273			ew32(INTSTAT, EpicNapiEvent);
1274			epic_napi_irq_on(dev, ep);
1275		} else
1276			ep->reschedule_in_poll--;
1277
1278		spin_unlock_irqrestore(&ep->napi_lock, flags);
1279
1280		if (more)
1281			goto rx_action;
1282	}
1283
1284	return work_done;
1285}
1286
1287static int epic_close(struct net_device *dev)
1288{
1289	struct epic_private *ep = netdev_priv(dev);
1290	struct pci_dev *pdev = ep->pci_dev;
1291	void __iomem *ioaddr = ep->ioaddr;
1292	struct sk_buff *skb;
1293	int i;
1294
1295	netif_stop_queue(dev);
1296	napi_disable(&ep->napi);
1297
1298	if (debug > 1)
1299		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1300			   er32(INTSTAT));
1301
1302	del_timer_sync(&ep->timer);
1303
1304	epic_disable_int(dev, ep);
1305
1306	free_irq(pdev->irq, dev);
1307
1308	epic_pause(dev);
1309
1310	/* Free all the skbuffs in the Rx queue. */
1311	for (i = 0; i < RX_RING_SIZE; i++) {
1312		skb = ep->rx_skbuff[i];
1313		ep->rx_skbuff[i] = NULL;
1314		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1315		ep->rx_ring[i].buflength = 0;
1316		if (skb) {
1317			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1318					 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1319			dev_kfree_skb(skb);
1320		}
1321		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1322	}
1323	for (i = 0; i < TX_RING_SIZE; i++) {
1324		skb = ep->tx_skbuff[i];
1325		ep->tx_skbuff[i] = NULL;
1326		if (!skb)
1327			continue;
1328		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1329				 PCI_DMA_TODEVICE);
1330		dev_kfree_skb(skb);
1331	}
1332
1333	/* Green! Leave the chip in low-power mode. */
1334	ew32(GENCTL, 0x0008);
1335
1336	return 0;
1337}
1338
1339static struct net_device_stats *epic_get_stats(struct net_device *dev)
1340{
1341	struct epic_private *ep = netdev_priv(dev);
1342	void __iomem *ioaddr = ep->ioaddr;
1343
1344	if (netif_running(dev)) {
1345		struct net_device_stats *stats = &dev->stats;
1346
1347		stats->rx_missed_errors	+= er8(MPCNT);
1348		stats->rx_frame_errors	+= er8(ALICNT);
1349		stats->rx_crc_errors	+= er8(CRCCNT);
1350	}
1351
1352	return &dev->stats;
1353}
1354
1355/* Set or clear the multicast filter for this adaptor.
1356   Note that we only use exclusion around actually queueing the
1357   new frame, not around filling ep->setup_frame.  This is non-deterministic
1358   when re-entered but still correct. */
1359
1360static void set_rx_mode(struct net_device *dev)
1361{
1362	struct epic_private *ep = netdev_priv(dev);
1363	void __iomem *ioaddr = ep->ioaddr;
1364	unsigned char mc_filter[8];		 /* Multicast hash filter */
1365	int i;
1366
1367	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1368		ew32(RxCtrl, 0x002c);
1369		/* Unconditionally log net taps. */
1370		memset(mc_filter, 0xff, sizeof(mc_filter));
1371	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1372		/* There is apparently a chip bug, so the multicast filter
1373		   is never enabled. */
1374		/* Too many to filter perfectly -- accept all multicasts. */
1375		memset(mc_filter, 0xff, sizeof(mc_filter));
1376		ew32(RxCtrl, 0x000c);
1377	} else if (netdev_mc_empty(dev)) {
1378		ew32(RxCtrl, 0x0004);
1379		return;
1380	} else {					/* Never executed, for now. */
1381		struct netdev_hw_addr *ha;
1382
1383		memset(mc_filter, 0, sizeof(mc_filter));
1384		netdev_for_each_mc_addr(ha, dev) {
1385			unsigned int bit_nr =
1386				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1387			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1388		}
1389	}
1390	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1391	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1392		for (i = 0; i < 4; i++)
1393			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1394		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1395	}
1396}
1397
1398static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1399{
1400	struct epic_private *np = netdev_priv(dev);
1401
1402	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1403	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1404	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1405}
1406
1407static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408{
1409	struct epic_private *np = netdev_priv(dev);
1410	int rc;
1411
1412	spin_lock_irq(&np->lock);
1413	rc = mii_ethtool_gset(&np->mii, cmd);
1414	spin_unlock_irq(&np->lock);
1415
1416	return rc;
1417}
1418
1419static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1420{
1421	struct epic_private *np = netdev_priv(dev);
1422	int rc;
1423
1424	spin_lock_irq(&np->lock);
1425	rc = mii_ethtool_sset(&np->mii, cmd);
1426	spin_unlock_irq(&np->lock);
1427
1428	return rc;
1429}
1430
1431static int netdev_nway_reset(struct net_device *dev)
1432{
1433	struct epic_private *np = netdev_priv(dev);
1434	return mii_nway_restart(&np->mii);
1435}
1436
1437static u32 netdev_get_link(struct net_device *dev)
1438{
1439	struct epic_private *np = netdev_priv(dev);
1440	return mii_link_ok(&np->mii);
1441}
1442
1443static u32 netdev_get_msglevel(struct net_device *dev)
1444{
1445	return debug;
1446}
1447
1448static void netdev_set_msglevel(struct net_device *dev, u32 value)
1449{
1450	debug = value;
1451}
1452
1453static int ethtool_begin(struct net_device *dev)
1454{
1455	struct epic_private *ep = netdev_priv(dev);
1456	void __iomem *ioaddr = ep->ioaddr;
1457
1458	/* power-up, if interface is down */
1459	if (!netif_running(dev)) {
1460		ew32(GENCTL, 0x0200);
1461		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1462	}
1463	return 0;
1464}
1465
1466static void ethtool_complete(struct net_device *dev)
1467{
1468	struct epic_private *ep = netdev_priv(dev);
1469	void __iomem *ioaddr = ep->ioaddr;
1470
1471	/* power-down, if interface is down */
1472	if (!netif_running(dev)) {
1473		ew32(GENCTL, 0x0008);
1474		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1475	}
1476}
1477
1478static const struct ethtool_ops netdev_ethtool_ops = {
1479	.get_drvinfo		= netdev_get_drvinfo,
1480	.get_settings		= netdev_get_settings,
1481	.set_settings		= netdev_set_settings,
1482	.nway_reset		= netdev_nway_reset,
1483	.get_link		= netdev_get_link,
1484	.get_msglevel		= netdev_get_msglevel,
1485	.set_msglevel		= netdev_set_msglevel,
1486	.begin			= ethtool_begin,
1487	.complete		= ethtool_complete
1488};
1489
1490static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1491{
1492	struct epic_private *np = netdev_priv(dev);
1493	void __iomem *ioaddr = np->ioaddr;
1494	struct mii_ioctl_data *data = if_mii(rq);
1495	int rc;
1496
1497	/* power-up, if interface is down */
1498	if (! netif_running(dev)) {
1499		ew32(GENCTL, 0x0200);
1500		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1501	}
1502
1503	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1504	spin_lock_irq(&np->lock);
1505	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1506	spin_unlock_irq(&np->lock);
1507
1508	/* power-down, if interface is down */
1509	if (! netif_running(dev)) {
1510		ew32(GENCTL, 0x0008);
1511		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1512	}
1513	return rc;
1514}
1515
1516
1517static void epic_remove_one(struct pci_dev *pdev)
1518{
1519	struct net_device *dev = pci_get_drvdata(pdev);
1520	struct epic_private *ep = netdev_priv(dev);
1521
1522	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1523	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1524	unregister_netdev(dev);
1525	pci_iounmap(pdev, ep->ioaddr);
1526	pci_release_regions(pdev);
1527	free_netdev(dev);
1528	pci_disable_device(pdev);
1529	/* pci_power_off(pdev, -1); */
1530}
1531
1532
1533#ifdef CONFIG_PM
1534
1535static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1536{
1537	struct net_device *dev = pci_get_drvdata(pdev);
1538	struct epic_private *ep = netdev_priv(dev);
1539	void __iomem *ioaddr = ep->ioaddr;
1540
1541	if (!netif_running(dev))
1542		return 0;
1543	epic_pause(dev);
1544	/* Put the chip into low-power mode. */
1545	ew32(GENCTL, 0x0008);
1546	/* pci_power_off(pdev, -1); */
1547	return 0;
1548}
1549
1550
1551static int epic_resume (struct pci_dev *pdev)
1552{
1553	struct net_device *dev = pci_get_drvdata(pdev);
1554
1555	if (!netif_running(dev))
1556		return 0;
1557	epic_restart(dev);
1558	/* pci_power_on(pdev); */
1559	return 0;
1560}
1561
1562#endif /* CONFIG_PM */
1563
1564
1565static struct pci_driver epic_driver = {
1566	.name		= DRV_NAME,
1567	.id_table	= epic_pci_tbl,
1568	.probe		= epic_init_one,
1569	.remove		= epic_remove_one,
1570#ifdef CONFIG_PM
1571	.suspend	= epic_suspend,
1572	.resume		= epic_resume,
1573#endif /* CONFIG_PM */
1574};
1575
1576
1577static int __init epic_init (void)
1578{
1579/* when a module, this is printed whether or not devices are found in probe */
1580#ifdef MODULE
1581	pr_info("%s%s\n", version, version2);
1582#endif
1583
1584	return pci_register_driver(&epic_driver);
1585}
1586
1587
1588static void __exit epic_cleanup (void)
1589{
1590	pci_unregister_driver (&epic_driver);
1591}
1592
1593
1594module_init(epic_init);
1595module_exit(epic_cleanup);