Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3	Written/copyright 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Information and updates available at
  21	http://www.scyld.com/network/epic100.html
  22	[this link no longer provides anything useful -jgarzik]
  23
  24	---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8		/* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE	256
  54#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
  55#define RX_RING_SIZE	256
  56#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <linux/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
  95static char version2[] =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112				Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149#ifdef USE_IO_OPS
 150#define EPIC_BAR	0
 151#else
 152#define EPIC_BAR	1
 153#endif
 154
 155typedef enum {
 156	SMSC_83C170_0,
 157	SMSC_83C170,
 158	SMSC_83C175,
 159} chip_t;
 160
 161
 162struct epic_chip_info {
 163	const char *name;
 164        int drv_flags;                          /* Driver use, intended as capability flags. */
 165};
 166
 167
 168/* indexed by chip_t */
 169static const struct epic_chip_info pci_id_tbl[] = {
 170	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
 171	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
 172	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
 173};
 174
 175
 176static const struct pci_device_id epic_pci_tbl[] = {
 177	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 178	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 179	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 180	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 181	{ 0,}
 182};
 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 184
 185#define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
 186#define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
 187#define er8(reg)	ioread8(ioaddr + (reg))
 188#define er16(reg)	ioread16(ioaddr + (reg))
 189#define er32(reg)	ioread32(ioaddr + (reg))
 190
 191/* Offsets to registers, using the (ugh) SMC names. */
 192enum epic_registers {
 193  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 194  PCIBurstCnt=0x18,
 195  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
 196  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 197  LAN0=64,						/* MAC address. */
 198  MC0=80,						/* Multicast filter table. */
 199  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 200  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 201};
 202
 203/* Interrupt register bits, using my own meaningful names. */
 204enum IntrStatus {
 205	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 206	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 207	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 208	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 209	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 210};
 211enum CommandBits {
 212	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 213	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 214};
 215
 216#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
 217
 218#define EpicNapiEvent	(TxEmpty | TxDone | \
 219			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 220#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
 221
 222static const u16 media2miictl[16] = {
 223	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 224	0, 0, 0, 0,  0, 0, 0, 0 };
 225
 226/*
 227 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 228 * really ARE host-endian; it's not a misannotation.  We tell
 229 * the card to byteswap them internally on big-endian hosts -
 230 * look for #ifdef __BIG_ENDIAN in epic_open().
 231 */
 232
 233struct epic_tx_desc {
 234	u32 txstatus;
 235	u32 bufaddr;
 236	u32 buflength;
 237	u32 next;
 238};
 239
 240struct epic_rx_desc {
 241	u32 rxstatus;
 242	u32 bufaddr;
 243	u32 buflength;
 244	u32 next;
 245};
 246
 247enum desc_status_bits {
 248	DescOwn=0x8000,
 249};
 250
 251#define PRIV_ALIGN	15 	/* Required alignment mask */
 252struct epic_private {
 253	struct epic_rx_desc *rx_ring;
 254	struct epic_tx_desc *tx_ring;
 255	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
 256	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 257	/* The addresses of receive-in-place skbuffs. */
 258	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 259
 260	dma_addr_t tx_ring_dma;
 261	dma_addr_t rx_ring_dma;
 262
 263	/* Ring pointers. */
 264	spinlock_t lock;				/* Group with Tx control cache line. */
 265	spinlock_t napi_lock;
 266	struct napi_struct napi;
 
 267	unsigned int cur_tx, dirty_tx;
 268
 269	unsigned int cur_rx, dirty_rx;
 270	u32 irq_mask;
 271	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 272
 273	void __iomem *ioaddr;
 274	struct pci_dev *pci_dev;			/* PCI bus location. */
 275	int chip_id, chip_flags;
 276
 277	struct timer_list timer;			/* Media selection timer. */
 278	int tx_threshold;
 279	unsigned char mc_filter[8];
 280	signed char phys[4];				/* MII device addresses. */
 281	u16 advertising;					/* NWay media advertisement */
 282	int mii_phy_cnt;
 283	u32 ethtool_ops_nesting;
 284	struct mii_if_info mii;
 285	unsigned int tx_full:1;				/* The Tx queue is full. */
 286	unsigned int default_port:4;		/* Last dev->if_port value. */
 287};
 288
 289static int epic_open(struct net_device *dev);
 290static int read_eeprom(struct epic_private *, int);
 291static int mdio_read(struct net_device *dev, int phy_id, int location);
 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 293static void epic_restart(struct net_device *dev);
 294static void epic_timer(struct timer_list *t);
 295static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
 296static void epic_init_ring(struct net_device *dev);
 297static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 298				   struct net_device *dev);
 299static int epic_rx(struct net_device *dev, int budget);
 300static int epic_poll(struct napi_struct *napi, int budget);
 301static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 302static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static int epic_close(struct net_device *dev);
 305static struct net_device_stats *epic_get_stats(struct net_device *dev);
 306static void set_rx_mode(struct net_device *dev);
 307
 308static const struct net_device_ops epic_netdev_ops = {
 309	.ndo_open		= epic_open,
 310	.ndo_stop		= epic_close,
 311	.ndo_start_xmit		= epic_start_xmit,
 312	.ndo_tx_timeout 	= epic_tx_timeout,
 313	.ndo_get_stats		= epic_get_stats,
 314	.ndo_set_rx_mode	= set_rx_mode,
 315	.ndo_eth_ioctl		= netdev_ioctl,
 
 316	.ndo_set_mac_address 	= eth_mac_addr,
 317	.ndo_validate_addr	= eth_validate_addr,
 318};
 319
 320static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 321{
 322	static int card_idx = -1;
 323	void __iomem *ioaddr;
 324	int chip_idx = (int) ent->driver_data;
 
 325	struct net_device *dev;
 326	struct epic_private *ep;
 327	int i, ret, option = 0, duplex = 0;
 328	__le16 addr[ETH_ALEN / 2];
 329	void *ring_space;
 330	dma_addr_t ring_dma;
 331
 332/* when built into the kernel, we only print version if device is found */
 333#ifndef MODULE
 334	pr_info_once("%s%s\n", version, version2);
 
 
 335#endif
 336
 337	card_idx++;
 338
 339	ret = pci_enable_device(pdev);
 340	if (ret)
 341		goto out;
 
 342
 343	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 344		dev_err(&pdev->dev, "no PCI region space\n");
 345		ret = -ENODEV;
 346		goto err_out_disable;
 347	}
 348
 349	pci_set_master(pdev);
 350
 351	ret = pci_request_regions(pdev, DRV_NAME);
 352	if (ret < 0)
 353		goto err_out_disable;
 354
 355	ret = -ENOMEM;
 356
 357	dev = alloc_etherdev(sizeof (*ep));
 358	if (!dev)
 359		goto err_out_free_res;
 360
 361	SET_NETDEV_DEV(dev, &pdev->dev);
 362
 363	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 364	if (!ioaddr) {
 365		dev_err(&pdev->dev, "ioremap failed\n");
 366		goto err_out_free_netdev;
 367	}
 368
 369	pci_set_drvdata(pdev, dev);
 370	ep = netdev_priv(dev);
 371	ep->ioaddr = ioaddr;
 372	ep->mii.dev = dev;
 373	ep->mii.mdio_read = mdio_read;
 374	ep->mii.mdio_write = mdio_write;
 375	ep->mii.phy_id_mask = 0x1f;
 376	ep->mii.reg_num_mask = 0x1f;
 377
 378	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 379					GFP_KERNEL);
 380	if (!ring_space)
 381		goto err_out_iounmap;
 382	ep->tx_ring = ring_space;
 383	ep->tx_ring_dma = ring_dma;
 384
 385	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 386					GFP_KERNEL);
 387	if (!ring_space)
 388		goto err_out_unmap_tx;
 389	ep->rx_ring = ring_space;
 390	ep->rx_ring_dma = ring_dma;
 391
 392	if (dev->mem_start) {
 393		option = dev->mem_start;
 394		duplex = (dev->mem_start & 16) ? 1 : 0;
 395	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 396		if (options[card_idx] >= 0)
 397			option = options[card_idx];
 398		if (full_duplex[card_idx] >= 0)
 399			duplex = full_duplex[card_idx];
 400	}
 401
 402	spin_lock_init(&ep->lock);
 403	spin_lock_init(&ep->napi_lock);
 
 404
 405	/* Bring the chip out of low-power mode. */
 406	ew32(GENCTL, 0x4200);
 407	/* Magic?!  If we don't set this bit the MII interface won't work. */
 408	/* This magic is documented in SMSC app note 7.15 */
 409	for (i = 16; i > 0; i--)
 410		ew32(TEST1, 0x0008);
 411
 412	/* Turn on the MII transceiver. */
 413	ew32(MIICfg, 0x12);
 414	if (chip_idx == 1)
 415		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 416	ew32(GENCTL, 0x0200);
 417
 418	/* Note: the '175 does not have a serial EEPROM. */
 419	for (i = 0; i < 3; i++)
 420		addr[i] = cpu_to_le16(er16(LAN0 + i*4));
 421	eth_hw_addr_set(dev, (u8 *)addr);
 422
 423	if (debug > 2) {
 424		dev_dbg(&pdev->dev, "EEPROM contents:\n");
 425		for (i = 0; i < 64; i++)
 426			pr_cont(" %4.4x%s", read_eeprom(ep, i),
 427				   i % 16 == 15 ? "\n" : "");
 428	}
 429
 430	ep->pci_dev = pdev;
 431	ep->chip_id = chip_idx;
 432	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 433	ep->irq_mask =
 434		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 435		 | CntFull | TxUnderrun | EpicNapiEvent;
 436
 437	/* Find the connected MII xcvrs.
 438	   Doing this in open() would allow detecting external xcvrs later, but
 439	   takes much time and no cards have external MII. */
 440	{
 441		int phy, phy_idx = 0;
 442		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 443			int mii_status = mdio_read(dev, phy, MII_BMSR);
 444			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 445				ep->phys[phy_idx++] = phy;
 446				dev_info(&pdev->dev,
 447					"MII transceiver #%d control "
 448					"%4.4x status %4.4x.\n",
 449					phy, mdio_read(dev, phy, 0), mii_status);
 450			}
 451		}
 452		ep->mii_phy_cnt = phy_idx;
 453		if (phy_idx != 0) {
 454			phy = ep->phys[0];
 455			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 456			dev_info(&pdev->dev,
 457				"Autonegotiation advertising %4.4x link "
 458				   "partner %4.4x.\n",
 459				   ep->mii.advertising, mdio_read(dev, phy, 5));
 460		} else if ( ! (ep->chip_flags & NO_MII)) {
 461			dev_warn(&pdev->dev,
 462				"***WARNING***: No MII transceiver found!\n");
 463			/* Use the known PHY address of the EPII. */
 464			ep->phys[0] = 3;
 465		}
 466		ep->mii.phy_id = ep->phys[0];
 467	}
 468
 469	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 470	if (ep->chip_flags & MII_PWRDWN)
 471		ew32(NVCTL, er32(NVCTL) & ~0x483c);
 472	ew32(GENCTL, 0x0008);
 473
 474	/* The lower four bits are the media type. */
 475	if (duplex) {
 476		ep->mii.force_media = ep->mii.full_duplex = 1;
 477		dev_info(&pdev->dev, "Forced full duplex requested.\n");
 478	}
 479	dev->if_port = ep->default_port = option;
 480
 481	/* The Epic-specific entries in the device structure. */
 482	dev->netdev_ops = &epic_netdev_ops;
 483	dev->ethtool_ops = &netdev_ethtool_ops;
 484	dev->watchdog_timeo = TX_TIMEOUT;
 485	netif_napi_add(dev, &ep->napi, epic_poll);
 486
 487	ret = register_netdev(dev);
 488	if (ret < 0)
 489		goto err_out_unmap_rx;
 490
 491	netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
 492		    pci_id_tbl[chip_idx].name,
 493		    (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 494		    dev->dev_addr);
 495
 496out:
 497	return ret;
 498
 499err_out_unmap_rx:
 500	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
 501			  ep->rx_ring_dma);
 502err_out_unmap_tx:
 503	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
 504			  ep->tx_ring_dma);
 505err_out_iounmap:
 506	pci_iounmap(pdev, ioaddr);
 507err_out_free_netdev:
 508	free_netdev(dev);
 509err_out_free_res:
 510	pci_release_regions(pdev);
 511err_out_disable:
 512	pci_disable_device(pdev);
 513	goto out;
 514}
 515
 516/* Serial EEPROM section. */
 517
 518/*  EEPROM_Ctrl bits. */
 519#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
 520#define EE_CS			0x02	/* EEPROM chip select. */
 521#define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
 522#define EE_WRITE_0		0x01
 523#define EE_WRITE_1		0x09
 524#define EE_DATA_READ	0x10	/* EEPROM chip data out. */
 525#define EE_ENB			(0x0001 | EE_CS)
 526
 527/* Delay between EEPROM clock transitions.
 528   This serves to flush the operation to the PCI bus.
 529 */
 530
 531#define eeprom_delay()	er32(EECTL)
 532
 533/* The EEPROM commands include the alway-set leading bit. */
 534#define EE_WRITE_CMD	(5 << 6)
 535#define EE_READ64_CMD	(6 << 6)
 536#define EE_READ256_CMD	(6 << 8)
 537#define EE_ERASE_CMD	(7 << 6)
 538
 539static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 540{
 541	void __iomem *ioaddr = ep->ioaddr;
 542
 543	ew32(INTMASK, 0x00000000);
 544}
 545
 546static inline void __epic_pci_commit(void __iomem *ioaddr)
 547{
 548#ifndef USE_IO_OPS
 549	er32(INTMASK);
 550#endif
 551}
 552
 553static inline void epic_napi_irq_off(struct net_device *dev,
 554				     struct epic_private *ep)
 555{
 556	void __iomem *ioaddr = ep->ioaddr;
 557
 558	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 559	__epic_pci_commit(ioaddr);
 560}
 561
 562static inline void epic_napi_irq_on(struct net_device *dev,
 563				    struct epic_private *ep)
 564{
 565	void __iomem *ioaddr = ep->ioaddr;
 566
 567	/* No need to commit possible posted write */
 568	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 569}
 570
 571static int read_eeprom(struct epic_private *ep, int location)
 572{
 573	void __iomem *ioaddr = ep->ioaddr;
 574	int i;
 575	int retval = 0;
 576	int read_cmd = location |
 577		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 578
 579	ew32(EECTL, EE_ENB & ~EE_CS);
 580	ew32(EECTL, EE_ENB);
 581
 582	/* Shift the read command bits out. */
 583	for (i = 12; i >= 0; i--) {
 584		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 585		ew32(EECTL, EE_ENB | dataval);
 586		eeprom_delay();
 587		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 588		eeprom_delay();
 589	}
 590	ew32(EECTL, EE_ENB);
 591
 592	for (i = 16; i > 0; i--) {
 593		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 594		eeprom_delay();
 595		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
 596		ew32(EECTL, EE_ENB);
 597		eeprom_delay();
 598	}
 599
 600	/* Terminate the EEPROM access. */
 601	ew32(EECTL, EE_ENB & ~EE_CS);
 602	return retval;
 603}
 604
 605#define MII_READOP		1
 606#define MII_WRITEOP		2
 607static int mdio_read(struct net_device *dev, int phy_id, int location)
 608{
 609	struct epic_private *ep = netdev_priv(dev);
 610	void __iomem *ioaddr = ep->ioaddr;
 611	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 612	int i;
 613
 614	ew32(MIICtrl, read_cmd);
 615	/* Typical operation takes 25 loops. */
 616	for (i = 400; i > 0; i--) {
 617		barrier();
 618		if ((er32(MIICtrl) & MII_READOP) == 0) {
 619			/* Work around read failure bug. */
 620			if (phy_id == 1 && location < 6 &&
 621			    er16(MIIData) == 0xffff) {
 622				ew32(MIICtrl, read_cmd);
 623				continue;
 624			}
 625			return er16(MIIData);
 626		}
 627	}
 628	return 0xffff;
 629}
 630
 631static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 632{
 633	struct epic_private *ep = netdev_priv(dev);
 634	void __iomem *ioaddr = ep->ioaddr;
 635	int i;
 636
 637	ew16(MIIData, value);
 638	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 639	for (i = 10000; i > 0; i--) {
 640		barrier();
 641		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 642			break;
 643	}
 644}
 645
 646
 647static int epic_open(struct net_device *dev)
 648{
 649	struct epic_private *ep = netdev_priv(dev);
 650	void __iomem *ioaddr = ep->ioaddr;
 651	const int irq = ep->pci_dev->irq;
 652	int rc, i;
 653
 654	/* Soft reset the chip. */
 655	ew32(GENCTL, 0x4001);
 656
 657	napi_enable(&ep->napi);
 658	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
 659	if (rc) {
 660		napi_disable(&ep->napi);
 661		return rc;
 662	}
 663
 664	epic_init_ring(dev);
 665
 666	ew32(GENCTL, 0x4000);
 667	/* This magic is documented in SMSC app note 7.15 */
 668	for (i = 16; i > 0; i--)
 669		ew32(TEST1, 0x0008);
 670
 671	/* Pull the chip out of low-power mode, enable interrupts, and set for
 672	   PCI read multiple.  The MIIcfg setting and strange write order are
 673	   required by the details of which bits are reset and the transceiver
 674	   wiring on the Ositech CardBus card.
 675	*/
 676#if 0
 677	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 678#endif
 679	if (ep->chip_flags & MII_PWRDWN)
 680		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 681
 682	/* Tell the chip to byteswap descriptors on big-endian hosts */
 683#ifdef __BIG_ENDIAN
 684	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
 685	er32(GENCTL);
 686	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 687#else
 688	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
 689	er32(GENCTL);
 690	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 691#endif
 692
 693	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 694
 695	for (i = 0; i < 3; i++)
 696		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 697
 698	ep->tx_threshold = TX_FIFO_THRESH;
 699	ew32(TxThresh, ep->tx_threshold);
 700
 701	if (media2miictl[dev->if_port & 15]) {
 702		if (ep->mii_phy_cnt)
 703			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 704		if (dev->if_port == 1) {
 705			if (debug > 1)
 706				netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
 707					    mdio_read(dev, ep->phys[0], MII_BMSR));
 
 708		}
 709	} else {
 710		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 711		if (mii_lpa != 0xffff) {
 712			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 713				ep->mii.full_duplex = 1;
 714			else if (! (mii_lpa & LPA_LPACK))
 715				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 716			if (debug > 1)
 717				netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
 718					    ep->mii.full_duplex ? "full"
 719								: "half",
 720					    ep->phys[0], mii_lpa);
 721		}
 722	}
 723
 724	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 725	ew32(PRxCDAR, ep->rx_ring_dma);
 726	ew32(PTxCDAR, ep->tx_ring_dma);
 727
 728	/* Start the chip's Rx process. */
 729	set_rx_mode(dev);
 730	ew32(COMMAND, StartRx | RxQueued);
 731
 732	netif_start_queue(dev);
 733
 734	/* Enable interrupts by setting the interrupt mask. */
 735	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 736	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 737	     TxUnderrun);
 738
 739	if (debug > 1) {
 740		netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
 741			   ioaddr, irq, er32(GENCTL),
 742			   ep->mii.full_duplex ? "full" : "half");
 
 743	}
 744
 745	/* Set the timer to switch to check for link beat and perhaps switch
 746	   to an alternate media type. */
 747	timer_setup(&ep->timer, epic_timer, 0);
 748	ep->timer.expires = jiffies + 3*HZ;
 
 
 749	add_timer(&ep->timer);
 750
 751	return rc;
 752}
 753
 754/* Reset the chip to recover from a PCI transaction error.
 755   This may occur at interrupt time. */
 756static void epic_pause(struct net_device *dev)
 757{
 758	struct net_device_stats *stats = &dev->stats;
 759	struct epic_private *ep = netdev_priv(dev);
 760	void __iomem *ioaddr = ep->ioaddr;
 761
 762	netif_stop_queue (dev);
 763
 764	/* Disable interrupts by clearing the interrupt mask. */
 765	ew32(INTMASK, 0x00000000);
 766	/* Stop the chip's Tx and Rx DMA processes. */
 767	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 768
 769	/* Update the error counts. */
 770	if (er16(COMMAND) != 0xffff) {
 771		stats->rx_missed_errors	+= er8(MPCNT);
 772		stats->rx_frame_errors	+= er8(ALICNT);
 773		stats->rx_crc_errors	+= er8(CRCCNT);
 774	}
 775
 776	/* Remove the packets on the Rx queue. */
 777	epic_rx(dev, RX_RING_SIZE);
 778}
 779
 780static void epic_restart(struct net_device *dev)
 781{
 782	struct epic_private *ep = netdev_priv(dev);
 783	void __iomem *ioaddr = ep->ioaddr;
 784	int i;
 785
 786	/* Soft reset the chip. */
 787	ew32(GENCTL, 0x4001);
 788
 789	netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 790		   ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 791	udelay(1);
 792
 793	/* This magic is documented in SMSC app note 7.15 */
 794	for (i = 16; i > 0; i--)
 795		ew32(TEST1, 0x0008);
 796
 797#ifdef __BIG_ENDIAN
 798	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 799#else
 800	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 801#endif
 802	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 803	if (ep->chip_flags & MII_PWRDWN)
 804		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 805
 806	for (i = 0; i < 3; i++)
 807		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 808
 809	ep->tx_threshold = TX_FIFO_THRESH;
 810	ew32(TxThresh, ep->tx_threshold);
 811	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 812	ew32(PRxCDAR, ep->rx_ring_dma +
 813	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
 814	ew32(PTxCDAR, ep->tx_ring_dma +
 815	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 816
 817	/* Start the chip's Rx process. */
 818	set_rx_mode(dev);
 819	ew32(COMMAND, StartRx | RxQueued);
 820
 821	/* Enable interrupts by setting the interrupt mask. */
 822	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 823	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 824	     TxUnderrun);
 825
 826	netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
 827		   er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 
 828}
 829
 830static void check_media(struct net_device *dev)
 831{
 832	struct epic_private *ep = netdev_priv(dev);
 833	void __iomem *ioaddr = ep->ioaddr;
 834	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 835	int negotiated = mii_lpa & ep->mii.advertising;
 836	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 837
 838	if (ep->mii.force_media)
 839		return;
 840	if (mii_lpa == 0xffff)		/* Bogus read */
 841		return;
 842	if (ep->mii.full_duplex != duplex) {
 843		ep->mii.full_duplex = duplex;
 844		netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
 845			    ep->mii.full_duplex ? "full" : "half",
 846			    ep->phys[0], mii_lpa);
 847		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 848	}
 849}
 850
 851static void epic_timer(struct timer_list *t)
 852{
 853	struct epic_private *ep = from_timer(ep, t, timer);
 854	struct net_device *dev = ep->mii.dev;
 855	void __iomem *ioaddr = ep->ioaddr;
 856	int next_tick = 5*HZ;
 857
 858	if (debug > 3) {
 859		netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
 860			   er32(TxSTAT));
 861		netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
 862			   er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 
 863	}
 864
 865	check_media(dev);
 866
 867	ep->timer.expires = jiffies + next_tick;
 868	add_timer(&ep->timer);
 869}
 870
 871static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
 872{
 873	struct epic_private *ep = netdev_priv(dev);
 874	void __iomem *ioaddr = ep->ioaddr;
 875
 876	if (debug > 0) {
 877		netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
 878			    er16(TxSTAT));
 879		if (debug > 1) {
 880			netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
 881				   ep->dirty_tx, ep->cur_tx);
 882		}
 883	}
 884	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
 885		dev->stats.tx_fifo_errors++;
 886		ew32(COMMAND, RestartTx);
 887	} else {
 888		epic_restart(dev);
 889		ew32(COMMAND, TxQueued);
 890	}
 891
 892	netif_trans_update(dev); /* prevent tx timeout */
 893	dev->stats.tx_errors++;
 894	if (!ep->tx_full)
 895		netif_wake_queue(dev);
 896}
 897
 898/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 899static void epic_init_ring(struct net_device *dev)
 900{
 901	struct epic_private *ep = netdev_priv(dev);
 902	int i;
 903
 904	ep->tx_full = 0;
 905	ep->dirty_tx = ep->cur_tx = 0;
 906	ep->cur_rx = ep->dirty_rx = 0;
 907	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 908
 909	/* Initialize all Rx descriptors. */
 910	for (i = 0; i < RX_RING_SIZE; i++) {
 911		ep->rx_ring[i].rxstatus = 0;
 912		ep->rx_ring[i].buflength = ep->rx_buf_sz;
 913		ep->rx_ring[i].next = ep->rx_ring_dma +
 914				      (i+1)*sizeof(struct epic_rx_desc);
 915		ep->rx_skbuff[i] = NULL;
 916	}
 917	/* Mark the last entry as wrapping the ring. */
 918	ep->rx_ring[i-1].next = ep->rx_ring_dma;
 919
 920	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 921	for (i = 0; i < RX_RING_SIZE; i++) {
 922		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
 923		ep->rx_skbuff[i] = skb;
 924		if (skb == NULL)
 925			break;
 926		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 927		ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
 928							skb->data,
 929							ep->rx_buf_sz,
 930							DMA_FROM_DEVICE);
 931		ep->rx_ring[i].rxstatus = DescOwn;
 932	}
 933	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 934
 935	/* The Tx buffer descriptor is filled in as needed, but we
 936	   do need to clear the ownership bit. */
 937	for (i = 0; i < TX_RING_SIZE; i++) {
 938		ep->tx_skbuff[i] = NULL;
 939		ep->tx_ring[i].txstatus = 0x0000;
 940		ep->tx_ring[i].next = ep->tx_ring_dma +
 941			(i+1)*sizeof(struct epic_tx_desc);
 942	}
 943	ep->tx_ring[i-1].next = ep->tx_ring_dma;
 944}
 945
 946static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 947{
 948	struct epic_private *ep = netdev_priv(dev);
 949	void __iomem *ioaddr = ep->ioaddr;
 950	int entry, free_count;
 951	u32 ctrl_word;
 952	unsigned long flags;
 953
 954	if (skb_padto(skb, ETH_ZLEN))
 955		return NETDEV_TX_OK;
 956
 957	/* Caution: the write order is important here, set the field with the
 958	   "ownership" bit last. */
 959
 960	/* Calculate the next Tx descriptor entry. */
 961	spin_lock_irqsave(&ep->lock, flags);
 962	free_count = ep->cur_tx - ep->dirty_tx;
 963	entry = ep->cur_tx % TX_RING_SIZE;
 964
 965	ep->tx_skbuff[entry] = skb;
 966	ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
 967						    skb->data, skb->len,
 968						    DMA_TO_DEVICE);
 969	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 970		ctrl_word = 0x100000; /* No interrupt */
 971	} else if (free_count == TX_QUEUE_LEN/2) {
 972		ctrl_word = 0x140000; /* Tx-done intr. */
 973	} else if (free_count < TX_QUEUE_LEN - 1) {
 974		ctrl_word = 0x100000; /* No Tx-done intr. */
 975	} else {
 976		/* Leave room for an additional entry. */
 977		ctrl_word = 0x140000; /* Tx-done intr. */
 978		ep->tx_full = 1;
 979	}
 980	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 981	ep->tx_ring[entry].txstatus =
 982		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 983			    | DescOwn;
 984
 985	ep->cur_tx++;
 986	if (ep->tx_full)
 987		netif_stop_queue(dev);
 988
 989	spin_unlock_irqrestore(&ep->lock, flags);
 990	/* Trigger an immediate transmit demand. */
 991	ew32(COMMAND, TxQueued);
 992
 993	if (debug > 4)
 994		netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
 995			   skb->len, entry, ctrl_word, er32(TxSTAT));
 
 996
 997	return NETDEV_TX_OK;
 998}
 999
1000static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1001			  int status)
1002{
1003	struct net_device_stats *stats = &dev->stats;
1004
1005#ifndef final_version
1006	/* There was an major error, log it. */
1007	if (debug > 1)
1008		netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1009			   status);
1010#endif
1011	stats->tx_errors++;
1012	if (status & 0x1050)
1013		stats->tx_aborted_errors++;
1014	if (status & 0x0008)
1015		stats->tx_carrier_errors++;
1016	if (status & 0x0040)
1017		stats->tx_window_errors++;
1018	if (status & 0x0010)
1019		stats->tx_fifo_errors++;
1020}
1021
1022static void epic_tx(struct net_device *dev, struct epic_private *ep)
1023{
1024	unsigned int dirty_tx, cur_tx;
1025
1026	/*
1027	 * Note: if this lock becomes a problem we can narrow the locked
1028	 * region at the cost of occasionally grabbing the lock more times.
1029	 */
1030	cur_tx = ep->cur_tx;
1031	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1032		struct sk_buff *skb;
1033		int entry = dirty_tx % TX_RING_SIZE;
1034		int txstatus = ep->tx_ring[entry].txstatus;
1035
1036		if (txstatus & DescOwn)
1037			break;	/* It still hasn't been Txed */
1038
1039		if (likely(txstatus & 0x0001)) {
1040			dev->stats.collisions += (txstatus >> 8) & 15;
1041			dev->stats.tx_packets++;
1042			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1043		} else
1044			epic_tx_error(dev, ep, txstatus);
1045
1046		/* Free the original skb. */
1047		skb = ep->tx_skbuff[entry];
1048		dma_unmap_single(&ep->pci_dev->dev,
1049				 ep->tx_ring[entry].bufaddr, skb->len,
1050				 DMA_TO_DEVICE);
1051		dev_consume_skb_irq(skb);
1052		ep->tx_skbuff[entry] = NULL;
1053	}
1054
1055#ifndef final_version
1056	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1057		netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1058			    dirty_tx, cur_tx, ep->tx_full);
 
1059		dirty_tx += TX_RING_SIZE;
1060	}
1061#endif
1062	ep->dirty_tx = dirty_tx;
1063	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1064		/* The ring is no longer full, allow new TX entries. */
1065		ep->tx_full = 0;
1066		netif_wake_queue(dev);
1067	}
1068}
1069
1070/* The interrupt handler does all of the Rx thread work and cleans up
1071   after the Tx thread. */
1072static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1073{
1074	struct net_device *dev = dev_instance;
1075	struct epic_private *ep = netdev_priv(dev);
1076	void __iomem *ioaddr = ep->ioaddr;
1077	unsigned int handled = 0;
1078	int status;
1079
1080	status = er32(INTSTAT);
1081	/* Acknowledge all of the current interrupt sources ASAP. */
1082	ew32(INTSTAT, status & EpicNormalEvent);
1083
1084	if (debug > 4) {
1085		netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1086			   status, er32(INTSTAT));
1087	}
1088
1089	if ((status & IntrSummary) == 0)
1090		goto out;
1091
1092	handled = 1;
1093
1094	if (status & EpicNapiEvent) {
1095		spin_lock(&ep->napi_lock);
1096		if (napi_schedule_prep(&ep->napi)) {
1097			epic_napi_irq_off(dev, ep);
1098			__napi_schedule(&ep->napi);
1099		}
 
1100		spin_unlock(&ep->napi_lock);
1101	}
1102	status &= ~EpicNapiEvent;
1103
1104	/* Check uncommon events all at once. */
1105	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1106		struct net_device_stats *stats = &dev->stats;
1107
1108		if (status == EpicRemoved)
1109			goto out;
1110
1111		/* Always update the error counts to avoid overhead later. */
1112		stats->rx_missed_errors	+= er8(MPCNT);
1113		stats->rx_frame_errors	+= er8(ALICNT);
1114		stats->rx_crc_errors	+= er8(CRCCNT);
1115
1116		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1117			stats->tx_fifo_errors++;
1118			ew32(TxThresh, ep->tx_threshold += 128);
1119			/* Restart the transmit process. */
1120			ew32(COMMAND, RestartTx);
1121		}
1122		if (status & PCIBusErr170) {
1123			netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1124				   status);
1125			epic_pause(dev);
1126			epic_restart(dev);
1127		}
1128		/* Clear all error sources. */
1129		ew32(INTSTAT, status & 0x7f18);
1130	}
1131
1132out:
1133	if (debug > 3) {
1134		netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1135			   status);
1136	}
1137
1138	return IRQ_RETVAL(handled);
1139}
1140
1141static int epic_rx(struct net_device *dev, int budget)
1142{
1143	struct epic_private *ep = netdev_priv(dev);
1144	int entry = ep->cur_rx % RX_RING_SIZE;
1145	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1146	int work_done = 0;
1147
1148	if (debug > 4)
1149		netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1150			   ep->rx_ring[entry].rxstatus);
1151
1152	if (rx_work_limit > budget)
1153		rx_work_limit = budget;
1154
1155	/* If we own the next entry, it's a new packet. Send it up. */
1156	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1157		int status = ep->rx_ring[entry].rxstatus;
1158
1159		if (debug > 4)
1160			netdev_dbg(dev, "  epic_rx() status was %8.8x.\n",
1161				   status);
1162		if (--rx_work_limit < 0)
1163			break;
1164		if (status & 0x2006) {
1165			if (debug > 2)
1166				netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1167					   status);
1168			if (status & 0x2000) {
1169				netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1170					    status);
1171				dev->stats.rx_length_errors++;
1172			} else if (status & 0x0006)
1173				/* Rx Frame errors are counted in hardware. */
1174				dev->stats.rx_errors++;
1175		} else {
1176			/* Malloc up new buffer, compatible with net-2e. */
1177			/* Omit the four octet CRC from the length. */
1178			short pkt_len = (status >> 16) - 4;
1179			struct sk_buff *skb;
1180
1181			if (pkt_len > PKT_BUF_SZ - 4) {
1182				netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1183					   status, pkt_len);
 
1184				pkt_len = 1514;
1185			}
1186			/* Check if the packet is long enough to accept without copying
1187			   to a minimally-sized skbuff. */
1188			if (pkt_len < rx_copybreak &&
1189			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1190				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1191				dma_sync_single_for_cpu(&ep->pci_dev->dev,
1192							ep->rx_ring[entry].bufaddr,
1193							ep->rx_buf_sz,
1194							DMA_FROM_DEVICE);
1195				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1196				skb_put(skb, pkt_len);
1197				dma_sync_single_for_device(&ep->pci_dev->dev,
1198							   ep->rx_ring[entry].bufaddr,
1199							   ep->rx_buf_sz,
1200							   DMA_FROM_DEVICE);
1201			} else {
1202				dma_unmap_single(&ep->pci_dev->dev,
1203						 ep->rx_ring[entry].bufaddr,
1204						 ep->rx_buf_sz,
1205						 DMA_FROM_DEVICE);
1206				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1207				ep->rx_skbuff[entry] = NULL;
1208			}
1209			skb->protocol = eth_type_trans(skb, dev);
1210			netif_receive_skb(skb);
1211			dev->stats.rx_packets++;
1212			dev->stats.rx_bytes += pkt_len;
1213		}
1214		work_done++;
1215		entry = (++ep->cur_rx) % RX_RING_SIZE;
1216	}
1217
1218	/* Refill the Rx ring buffers. */
1219	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1220		entry = ep->dirty_rx % RX_RING_SIZE;
1221		if (ep->rx_skbuff[entry] == NULL) {
1222			struct sk_buff *skb;
1223			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1224			if (skb == NULL)
1225				break;
1226			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1227			ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1228								    skb->data,
1229								    ep->rx_buf_sz,
1230								    DMA_FROM_DEVICE);
1231			work_done++;
1232		}
1233		/* AV: shouldn't we add a barrier here? */
1234		ep->rx_ring[entry].rxstatus = DescOwn;
1235	}
1236	return work_done;
1237}
1238
1239static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1240{
1241	void __iomem *ioaddr = ep->ioaddr;
1242	int status;
1243
1244	status = er32(INTSTAT);
1245
1246	if (status == EpicRemoved)
1247		return;
1248	if (status & RxOverflow) 	/* Missed a Rx frame. */
1249		dev->stats.rx_errors++;
1250	if (status & (RxOverflow | RxFull))
1251		ew16(COMMAND, RxQueued);
1252}
1253
1254static int epic_poll(struct napi_struct *napi, int budget)
1255{
1256	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1257	struct net_device *dev = ep->mii.dev;
 
1258	void __iomem *ioaddr = ep->ioaddr;
1259	int work_done;
 
1260
1261	epic_tx(dev, ep);
1262
1263	work_done = epic_rx(dev, budget);
1264
1265	epic_rx_err(dev, ep);
1266
1267	if (work_done < budget && napi_complete_done(napi, work_done)) {
1268		unsigned long flags;
 
 
 
1269
1270		spin_lock_irqsave(&ep->napi_lock, flags);
1271
1272		ew32(INTSTAT, EpicNapiEvent);
1273		epic_napi_irq_on(dev, ep);
 
 
 
 
 
 
1274		spin_unlock_irqrestore(&ep->napi_lock, flags);
 
 
 
1275	}
1276
1277	return work_done;
1278}
1279
1280static int epic_close(struct net_device *dev)
1281{
1282	struct epic_private *ep = netdev_priv(dev);
1283	struct pci_dev *pdev = ep->pci_dev;
1284	void __iomem *ioaddr = ep->ioaddr;
1285	struct sk_buff *skb;
1286	int i;
1287
1288	netif_stop_queue(dev);
1289	napi_disable(&ep->napi);
1290
1291	if (debug > 1)
1292		netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1293			   er32(INTSTAT));
1294
1295	del_timer_sync(&ep->timer);
1296
1297	epic_disable_int(dev, ep);
1298
1299	free_irq(pdev->irq, dev);
1300
1301	epic_pause(dev);
1302
1303	/* Free all the skbuffs in the Rx queue. */
1304	for (i = 0; i < RX_RING_SIZE; i++) {
1305		skb = ep->rx_skbuff[i];
1306		ep->rx_skbuff[i] = NULL;
1307		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1308		ep->rx_ring[i].buflength = 0;
1309		if (skb) {
1310			dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
1311					 ep->rx_buf_sz, DMA_FROM_DEVICE);
1312			dev_kfree_skb(skb);
1313		}
1314		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1315	}
1316	for (i = 0; i < TX_RING_SIZE; i++) {
1317		skb = ep->tx_skbuff[i];
1318		ep->tx_skbuff[i] = NULL;
1319		if (!skb)
1320			continue;
1321		dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
1322				 DMA_TO_DEVICE);
1323		dev_kfree_skb(skb);
1324	}
1325
1326	/* Green! Leave the chip in low-power mode. */
1327	ew32(GENCTL, 0x0008);
1328
1329	return 0;
1330}
1331
1332static struct net_device_stats *epic_get_stats(struct net_device *dev)
1333{
1334	struct epic_private *ep = netdev_priv(dev);
1335	void __iomem *ioaddr = ep->ioaddr;
1336
1337	if (netif_running(dev)) {
1338		struct net_device_stats *stats = &dev->stats;
1339
1340		stats->rx_missed_errors	+= er8(MPCNT);
1341		stats->rx_frame_errors	+= er8(ALICNT);
1342		stats->rx_crc_errors	+= er8(CRCCNT);
1343	}
1344
1345	return &dev->stats;
1346}
1347
1348/* Set or clear the multicast filter for this adaptor.
1349   Note that we only use exclusion around actually queueing the
1350   new frame, not around filling ep->setup_frame.  This is non-deterministic
1351   when re-entered but still correct. */
1352
1353static void set_rx_mode(struct net_device *dev)
1354{
1355	struct epic_private *ep = netdev_priv(dev);
1356	void __iomem *ioaddr = ep->ioaddr;
1357	unsigned char mc_filter[8];		 /* Multicast hash filter */
1358	int i;
1359
1360	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1361		ew32(RxCtrl, 0x002c);
1362		/* Unconditionally log net taps. */
1363		memset(mc_filter, 0xff, sizeof(mc_filter));
1364	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1365		/* There is apparently a chip bug, so the multicast filter
1366		   is never enabled. */
1367		/* Too many to filter perfectly -- accept all multicasts. */
1368		memset(mc_filter, 0xff, sizeof(mc_filter));
1369		ew32(RxCtrl, 0x000c);
1370	} else if (netdev_mc_empty(dev)) {
1371		ew32(RxCtrl, 0x0004);
1372		return;
1373	} else {					/* Never executed, for now. */
1374		struct netdev_hw_addr *ha;
1375
1376		memset(mc_filter, 0, sizeof(mc_filter));
1377		netdev_for_each_mc_addr(ha, dev) {
1378			unsigned int bit_nr =
1379				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1380			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1381		}
1382	}
1383	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1384	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1385		for (i = 0; i < 4; i++)
1386			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1387		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1388	}
1389}
1390
1391static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1392{
1393	struct epic_private *np = netdev_priv(dev);
1394
1395	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1396	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1397	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1398}
1399
1400static int netdev_get_link_ksettings(struct net_device *dev,
1401				     struct ethtool_link_ksettings *cmd)
1402{
1403	struct epic_private *np = netdev_priv(dev);
 
1404
1405	spin_lock_irq(&np->lock);
1406	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1407	spin_unlock_irq(&np->lock);
1408
1409	return 0;
1410}
1411
1412static int netdev_set_link_ksettings(struct net_device *dev,
1413				     const struct ethtool_link_ksettings *cmd)
1414{
1415	struct epic_private *np = netdev_priv(dev);
1416	int rc;
1417
1418	spin_lock_irq(&np->lock);
1419	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1420	spin_unlock_irq(&np->lock);
1421
1422	return rc;
1423}
1424
1425static int netdev_nway_reset(struct net_device *dev)
1426{
1427	struct epic_private *np = netdev_priv(dev);
1428	return mii_nway_restart(&np->mii);
1429}
1430
1431static u32 netdev_get_link(struct net_device *dev)
1432{
1433	struct epic_private *np = netdev_priv(dev);
1434	return mii_link_ok(&np->mii);
1435}
1436
1437static u32 netdev_get_msglevel(struct net_device *dev)
1438{
1439	return debug;
1440}
1441
1442static void netdev_set_msglevel(struct net_device *dev, u32 value)
1443{
1444	debug = value;
1445}
1446
1447static int ethtool_begin(struct net_device *dev)
1448{
1449	struct epic_private *ep = netdev_priv(dev);
1450	void __iomem *ioaddr = ep->ioaddr;
1451
1452	if (ep->ethtool_ops_nesting == U32_MAX)
1453		return -EBUSY;
1454	/* power-up, if interface is down */
1455	if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
1456		ew32(GENCTL, 0x0200);
1457		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1458	}
1459	return 0;
1460}
1461
1462static void ethtool_complete(struct net_device *dev)
1463{
1464	struct epic_private *ep = netdev_priv(dev);
1465	void __iomem *ioaddr = ep->ioaddr;
1466
1467	/* power-down, if interface is down */
1468	if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
1469		ew32(GENCTL, 0x0008);
1470		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1471	}
1472}
1473
1474static const struct ethtool_ops netdev_ethtool_ops = {
1475	.get_drvinfo		= netdev_get_drvinfo,
 
 
1476	.nway_reset		= netdev_nway_reset,
1477	.get_link		= netdev_get_link,
1478	.get_msglevel		= netdev_get_msglevel,
1479	.set_msglevel		= netdev_set_msglevel,
1480	.begin			= ethtool_begin,
1481	.complete		= ethtool_complete,
1482	.get_link_ksettings	= netdev_get_link_ksettings,
1483	.set_link_ksettings	= netdev_set_link_ksettings,
1484};
1485
1486static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1487{
1488	struct epic_private *np = netdev_priv(dev);
1489	void __iomem *ioaddr = np->ioaddr;
1490	struct mii_ioctl_data *data = if_mii(rq);
1491	int rc;
1492
1493	/* power-up, if interface is down */
1494	if (! netif_running(dev)) {
1495		ew32(GENCTL, 0x0200);
1496		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1497	}
1498
1499	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1500	spin_lock_irq(&np->lock);
1501	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1502	spin_unlock_irq(&np->lock);
1503
1504	/* power-down, if interface is down */
1505	if (! netif_running(dev)) {
1506		ew32(GENCTL, 0x0008);
1507		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1508	}
1509	return rc;
1510}
1511
1512
1513static void epic_remove_one(struct pci_dev *pdev)
1514{
1515	struct net_device *dev = pci_get_drvdata(pdev);
1516	struct epic_private *ep = netdev_priv(dev);
1517
 
 
1518	unregister_netdev(dev);
1519	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
1520			  ep->tx_ring_dma);
1521	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
1522			  ep->rx_ring_dma);
1523	pci_iounmap(pdev, ep->ioaddr);
1524	free_netdev(dev);
1525	pci_release_regions(pdev);
 
1526	pci_disable_device(pdev);
 
1527	/* pci_power_off(pdev, -1); */
1528}
1529
1530static int __maybe_unused epic_suspend(struct device *dev_d)
 
 
 
1531{
1532	struct net_device *dev = dev_get_drvdata(dev_d);
1533	struct epic_private *ep = netdev_priv(dev);
1534	void __iomem *ioaddr = ep->ioaddr;
1535
1536	if (!netif_running(dev))
1537		return 0;
1538	epic_pause(dev);
1539	/* Put the chip into low-power mode. */
1540	ew32(GENCTL, 0x0008);
1541	/* pci_power_off(pdev, -1); */
1542	return 0;
1543}
1544
1545
1546static int __maybe_unused epic_resume(struct device *dev_d)
1547{
1548	struct net_device *dev = dev_get_drvdata(dev_d);
1549
1550	if (!netif_running(dev))
1551		return 0;
1552	epic_restart(dev);
1553	/* pci_power_on(pdev); */
1554	return 0;
1555}
1556
1557static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
 
1558
1559static struct pci_driver epic_driver = {
1560	.name		= DRV_NAME,
1561	.id_table	= epic_pci_tbl,
1562	.probe		= epic_init_one,
1563	.remove		= epic_remove_one,
1564	.driver.pm	= &epic_pm_ops,
 
 
 
1565};
1566
1567
1568static int __init epic_init (void)
1569{
1570/* when a module, this is printed whether or not devices are found in probe */
1571#ifdef MODULE
1572	pr_info("%s%s\n", version, version2);
 
1573#endif
1574
1575	return pci_register_driver(&epic_driver);
1576}
1577
1578
1579static void __exit epic_cleanup (void)
1580{
1581	pci_unregister_driver (&epic_driver);
1582}
1583
1584
1585module_init(epic_init);
1586module_exit(epic_cleanup);
v3.5.6
   1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
   2/*
   3	Written/copyright 1997-2001 by Donald Becker.
   4
   5	This software may be used and distributed according to the terms of
   6	the GNU General Public License (GPL), incorporated herein by reference.
   7	Drivers based on or derived from this code fall under the GPL and must
   8	retain the authorship, copyright and license notice.  This file is not
   9	a complete program and may only be used when the entire operating
  10	system is licensed under the GPL.
  11
  12	This driver is for the SMC83c170/175 "EPIC" series, as used on the
  13	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
  14
  15	The author may be reached as becker@scyld.com, or C/O
  16	Scyld Computing Corporation
  17	410 Severn Ave., Suite 210
  18	Annapolis MD 21403
  19
  20	Information and updates available at
  21	http://www.scyld.com/network/epic100.html
  22	[this link no longer provides anything useful -jgarzik]
  23
  24	---------------------------------------------------------------------
  25
  26*/
  27
  28#define DRV_NAME        "epic100"
  29#define DRV_VERSION     "2.1"
  30#define DRV_RELDATE     "Sept 11, 2006"
  31
  32/* The user-configurable values.
  33   These may be modified when a driver module is loaded.*/
  34
  35static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
  36
  37/* Used to pass the full-duplex flag, etc. */
  38#define MAX_UNITS 8		/* More are supported, limit only on options */
  39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
  41
  42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  43   Setting to > 1518 effectively disables this feature. */
  44static int rx_copybreak;
  45
  46/* Operational parameters that are set at compile time. */
  47
  48/* Keep the ring sizes a power of two for operational efficiency.
  49   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  50   Making the Tx ring too large decreases the effectiveness of channel
  51   bonding and packet priority.
  52   There are no ill effects from too-large receive rings. */
  53#define TX_RING_SIZE	256
  54#define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
  55#define RX_RING_SIZE	256
  56#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
  57#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
  58
  59/* Operational parameters that usually are not changed. */
  60/* Time in jiffies before concluding the transmitter is hung. */
  61#define TX_TIMEOUT  (2*HZ)
  62
  63#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
  64
  65/* Bytes transferred to chip before transmission starts. */
  66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
  67#define TX_FIFO_THRESH 256
  68#define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
  69
  70#include <linux/module.h>
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/timer.h>
  74#include <linux/errno.h>
  75#include <linux/ioport.h>
  76#include <linux/interrupt.h>
  77#include <linux/pci.h>
  78#include <linux/delay.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/spinlock.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/crc32.h>
  87#include <linux/bitops.h>
  88#include <asm/io.h>
  89#include <asm/uaccess.h>
  90#include <asm/byteorder.h>
  91
  92/* These identify the driver base version and may not be removed. */
  93static char version[] __devinitdata =
  94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
  95static char version2[] __devinitdata =
  96"  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
  97
  98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
  99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
 100MODULE_LICENSE("GPL");
 101
 102module_param(debug, int, 0);
 103module_param(rx_copybreak, int, 0);
 104module_param_array(options, int, NULL, 0);
 105module_param_array(full_duplex, int, NULL, 0);
 106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
 107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
 108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
 109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
 110
 111/*
 112				Theory of Operation
 113
 114I. Board Compatibility
 115
 116This device driver is designed for the SMC "EPIC/100", the SMC
 117single-chip Ethernet controllers for PCI.  This chip is used on
 118the SMC EtherPower II boards.
 119
 120II. Board-specific settings
 121
 122PCI bus devices are configured by the system at boot time, so no jumpers
 123need to be set on the board.  The system BIOS will assign the
 124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
 125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
 126interrupt lines.
 127
 128III. Driver operation
 129
 130IIIa. Ring buffers
 131
 132IVb. References
 133
 134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
 135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
 136http://scyld.com/expert/NWay.html
 137http://www.national.com/pf/DP/DP83840A.html
 138
 139IVc. Errata
 140
 141*/
 142
 143
 144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
 145
 146#define EPIC_TOTAL_SIZE 0x100
 147#define USE_IO_OPS 1
 148
 149#ifdef USE_IO_OPS
 150#define EPIC_BAR	0
 151#else
 152#define EPIC_BAR	1
 153#endif
 154
 155typedef enum {
 156	SMSC_83C170_0,
 157	SMSC_83C170,
 158	SMSC_83C175,
 159} chip_t;
 160
 161
 162struct epic_chip_info {
 163	const char *name;
 164        int drv_flags;                          /* Driver use, intended as capability flags. */
 165};
 166
 167
 168/* indexed by chip_t */
 169static const struct epic_chip_info pci_id_tbl[] = {
 170	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
 171	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
 172	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
 173};
 174
 175
 176static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
 177	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
 178	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
 179	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
 180	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
 181	{ 0,}
 182};
 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 184
 185#define ew16(reg, val)	iowrite16(val, ioaddr + (reg))
 186#define ew32(reg, val)	iowrite32(val, ioaddr + (reg))
 187#define er8(reg)	ioread8(ioaddr + (reg))
 188#define er16(reg)	ioread16(ioaddr + (reg))
 189#define er32(reg)	ioread32(ioaddr + (reg))
 190
 191/* Offsets to registers, using the (ugh) SMC names. */
 192enum epic_registers {
 193  COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
 194  PCIBurstCnt=0x18,
 195  TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
 196  MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
 197  LAN0=64,						/* MAC address. */
 198  MC0=80,						/* Multicast filter table. */
 199  RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
 200  PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
 201};
 202
 203/* Interrupt register bits, using my own meaningful names. */
 204enum IntrStatus {
 205	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
 206	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
 207	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
 208	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
 209	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
 210};
 211enum CommandBits {
 212	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
 213	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
 214};
 215
 216#define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
 217
 218#define EpicNapiEvent	(TxEmpty | TxDone | \
 219			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
 220#define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
 221
 222static const u16 media2miictl[16] = {
 223	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
 224	0, 0, 0, 0,  0, 0, 0, 0 };
 225
 226/*
 227 * The EPIC100 Rx and Tx buffer descriptors.  Note that these
 228 * really ARE host-endian; it's not a misannotation.  We tell
 229 * the card to byteswap them internally on big-endian hosts -
 230 * look for #ifdef __BIG_ENDIAN in epic_open().
 231 */
 232
 233struct epic_tx_desc {
 234	u32 txstatus;
 235	u32 bufaddr;
 236	u32 buflength;
 237	u32 next;
 238};
 239
 240struct epic_rx_desc {
 241	u32 rxstatus;
 242	u32 bufaddr;
 243	u32 buflength;
 244	u32 next;
 245};
 246
 247enum desc_status_bits {
 248	DescOwn=0x8000,
 249};
 250
 251#define PRIV_ALIGN	15 	/* Required alignment mask */
 252struct epic_private {
 253	struct epic_rx_desc *rx_ring;
 254	struct epic_tx_desc *tx_ring;
 255	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
 256	struct sk_buff* tx_skbuff[TX_RING_SIZE];
 257	/* The addresses of receive-in-place skbuffs. */
 258	struct sk_buff* rx_skbuff[RX_RING_SIZE];
 259
 260	dma_addr_t tx_ring_dma;
 261	dma_addr_t rx_ring_dma;
 262
 263	/* Ring pointers. */
 264	spinlock_t lock;				/* Group with Tx control cache line. */
 265	spinlock_t napi_lock;
 266	struct napi_struct napi;
 267	unsigned int reschedule_in_poll;
 268	unsigned int cur_tx, dirty_tx;
 269
 270	unsigned int cur_rx, dirty_rx;
 271	u32 irq_mask;
 272	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
 273
 274	void __iomem *ioaddr;
 275	struct pci_dev *pci_dev;			/* PCI bus location. */
 276	int chip_id, chip_flags;
 277
 278	struct timer_list timer;			/* Media selection timer. */
 279	int tx_threshold;
 280	unsigned char mc_filter[8];
 281	signed char phys[4];				/* MII device addresses. */
 282	u16 advertising;					/* NWay media advertisement */
 283	int mii_phy_cnt;
 
 284	struct mii_if_info mii;
 285	unsigned int tx_full:1;				/* The Tx queue is full. */
 286	unsigned int default_port:4;		/* Last dev->if_port value. */
 287};
 288
 289static int epic_open(struct net_device *dev);
 290static int read_eeprom(struct epic_private *, int);
 291static int mdio_read(struct net_device *dev, int phy_id, int location);
 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
 293static void epic_restart(struct net_device *dev);
 294static void epic_timer(unsigned long data);
 295static void epic_tx_timeout(struct net_device *dev);
 296static void epic_init_ring(struct net_device *dev);
 297static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
 298				   struct net_device *dev);
 299static int epic_rx(struct net_device *dev, int budget);
 300static int epic_poll(struct napi_struct *napi, int budget);
 301static irqreturn_t epic_interrupt(int irq, void *dev_instance);
 302static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 303static const struct ethtool_ops netdev_ethtool_ops;
 304static int epic_close(struct net_device *dev);
 305static struct net_device_stats *epic_get_stats(struct net_device *dev);
 306static void set_rx_mode(struct net_device *dev);
 307
 308static const struct net_device_ops epic_netdev_ops = {
 309	.ndo_open		= epic_open,
 310	.ndo_stop		= epic_close,
 311	.ndo_start_xmit		= epic_start_xmit,
 312	.ndo_tx_timeout 	= epic_tx_timeout,
 313	.ndo_get_stats		= epic_get_stats,
 314	.ndo_set_rx_mode	= set_rx_mode,
 315	.ndo_do_ioctl 		= netdev_ioctl,
 316	.ndo_change_mtu		= eth_change_mtu,
 317	.ndo_set_mac_address 	= eth_mac_addr,
 318	.ndo_validate_addr	= eth_validate_addr,
 319};
 320
 321static int __devinit epic_init_one(struct pci_dev *pdev,
 322				   const struct pci_device_id *ent)
 323{
 324	static int card_idx = -1;
 325	void __iomem *ioaddr;
 326	int chip_idx = (int) ent->driver_data;
 327	int irq;
 328	struct net_device *dev;
 329	struct epic_private *ep;
 330	int i, ret, option = 0, duplex = 0;
 
 331	void *ring_space;
 332	dma_addr_t ring_dma;
 333
 334/* when built into the kernel, we only print version if device is found */
 335#ifndef MODULE
 336	static int printed_version;
 337	if (!printed_version++)
 338		printk(KERN_INFO "%s%s", version, version2);
 339#endif
 340
 341	card_idx++;
 342
 343	ret = pci_enable_device(pdev);
 344	if (ret)
 345		goto out;
 346	irq = pdev->irq;
 347
 348	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
 349		dev_err(&pdev->dev, "no PCI region space\n");
 350		ret = -ENODEV;
 351		goto err_out_disable;
 352	}
 353
 354	pci_set_master(pdev);
 355
 356	ret = pci_request_regions(pdev, DRV_NAME);
 357	if (ret < 0)
 358		goto err_out_disable;
 359
 360	ret = -ENOMEM;
 361
 362	dev = alloc_etherdev(sizeof (*ep));
 363	if (!dev)
 364		goto err_out_free_res;
 365
 366	SET_NETDEV_DEV(dev, &pdev->dev);
 367
 368	ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
 369	if (!ioaddr) {
 370		dev_err(&pdev->dev, "ioremap failed\n");
 371		goto err_out_free_netdev;
 372	}
 373
 374	pci_set_drvdata(pdev, dev);
 375	ep = netdev_priv(dev);
 376	ep->ioaddr = ioaddr;
 377	ep->mii.dev = dev;
 378	ep->mii.mdio_read = mdio_read;
 379	ep->mii.mdio_write = mdio_write;
 380	ep->mii.phy_id_mask = 0x1f;
 381	ep->mii.reg_num_mask = 0x1f;
 382
 383	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 
 384	if (!ring_space)
 385		goto err_out_iounmap;
 386	ep->tx_ring = ring_space;
 387	ep->tx_ring_dma = ring_dma;
 388
 389	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 
 390	if (!ring_space)
 391		goto err_out_unmap_tx;
 392	ep->rx_ring = ring_space;
 393	ep->rx_ring_dma = ring_dma;
 394
 395	if (dev->mem_start) {
 396		option = dev->mem_start;
 397		duplex = (dev->mem_start & 16) ? 1 : 0;
 398	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
 399		if (options[card_idx] >= 0)
 400			option = options[card_idx];
 401		if (full_duplex[card_idx] >= 0)
 402			duplex = full_duplex[card_idx];
 403	}
 404
 405	spin_lock_init(&ep->lock);
 406	spin_lock_init(&ep->napi_lock);
 407	ep->reschedule_in_poll = 0;
 408
 409	/* Bring the chip out of low-power mode. */
 410	ew32(GENCTL, 0x4200);
 411	/* Magic?!  If we don't set this bit the MII interface won't work. */
 412	/* This magic is documented in SMSC app note 7.15 */
 413	for (i = 16; i > 0; i--)
 414		ew32(TEST1, 0x0008);
 415
 416	/* Turn on the MII transceiver. */
 417	ew32(MIICfg, 0x12);
 418	if (chip_idx == 1)
 419		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 420	ew32(GENCTL, 0x0200);
 421
 422	/* Note: the '175 does not have a serial EEPROM. */
 423	for (i = 0; i < 3; i++)
 424		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
 
 425
 426	if (debug > 2) {
 427		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
 428		for (i = 0; i < 64; i++)
 429			printk(" %4.4x%s", read_eeprom(ep, i),
 430				   i % 16 == 15 ? "\n" : "");
 431	}
 432
 433	ep->pci_dev = pdev;
 434	ep->chip_id = chip_idx;
 435	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
 436	ep->irq_mask =
 437		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 438		 | CntFull | TxUnderrun | EpicNapiEvent;
 439
 440	/* Find the connected MII xcvrs.
 441	   Doing this in open() would allow detecting external xcvrs later, but
 442	   takes much time and no cards have external MII. */
 443	{
 444		int phy, phy_idx = 0;
 445		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
 446			int mii_status = mdio_read(dev, phy, MII_BMSR);
 447			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 448				ep->phys[phy_idx++] = phy;
 449				dev_info(&pdev->dev,
 450					"MII transceiver #%d control "
 451					"%4.4x status %4.4x.\n",
 452					phy, mdio_read(dev, phy, 0), mii_status);
 453			}
 454		}
 455		ep->mii_phy_cnt = phy_idx;
 456		if (phy_idx != 0) {
 457			phy = ep->phys[0];
 458			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
 459			dev_info(&pdev->dev,
 460				"Autonegotiation advertising %4.4x link "
 461				   "partner %4.4x.\n",
 462				   ep->mii.advertising, mdio_read(dev, phy, 5));
 463		} else if ( ! (ep->chip_flags & NO_MII)) {
 464			dev_warn(&pdev->dev,
 465				"***WARNING***: No MII transceiver found!\n");
 466			/* Use the known PHY address of the EPII. */
 467			ep->phys[0] = 3;
 468		}
 469		ep->mii.phy_id = ep->phys[0];
 470	}
 471
 472	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
 473	if (ep->chip_flags & MII_PWRDWN)
 474		ew32(NVCTL, er32(NVCTL) & ~0x483c);
 475	ew32(GENCTL, 0x0008);
 476
 477	/* The lower four bits are the media type. */
 478	if (duplex) {
 479		ep->mii.force_media = ep->mii.full_duplex = 1;
 480		dev_info(&pdev->dev, "Forced full duplex requested.\n");
 481	}
 482	dev->if_port = ep->default_port = option;
 483
 484	/* The Epic-specific entries in the device structure. */
 485	dev->netdev_ops = &epic_netdev_ops;
 486	dev->ethtool_ops = &netdev_ethtool_ops;
 487	dev->watchdog_timeo = TX_TIMEOUT;
 488	netif_napi_add(dev, &ep->napi, epic_poll, 64);
 489
 490	ret = register_netdev(dev);
 491	if (ret < 0)
 492		goto err_out_unmap_rx;
 493
 494	printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
 495	       dev->name, pci_id_tbl[chip_idx].name,
 496	       (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
 497	       dev->dev_addr);
 498
 499out:
 500	return ret;
 501
 502err_out_unmap_rx:
 503	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 
 504err_out_unmap_tx:
 505	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 
 506err_out_iounmap:
 507	pci_iounmap(pdev, ioaddr);
 508err_out_free_netdev:
 509	free_netdev(dev);
 510err_out_free_res:
 511	pci_release_regions(pdev);
 512err_out_disable:
 513	pci_disable_device(pdev);
 514	goto out;
 515}
 516
 517/* Serial EEPROM section. */
 518
 519/*  EEPROM_Ctrl bits. */
 520#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
 521#define EE_CS			0x02	/* EEPROM chip select. */
 522#define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
 523#define EE_WRITE_0		0x01
 524#define EE_WRITE_1		0x09
 525#define EE_DATA_READ	0x10	/* EEPROM chip data out. */
 526#define EE_ENB			(0x0001 | EE_CS)
 527
 528/* Delay between EEPROM clock transitions.
 529   This serves to flush the operation to the PCI bus.
 530 */
 531
 532#define eeprom_delay()	er32(EECTL)
 533
 534/* The EEPROM commands include the alway-set leading bit. */
 535#define EE_WRITE_CMD	(5 << 6)
 536#define EE_READ64_CMD	(6 << 6)
 537#define EE_READ256_CMD	(6 << 8)
 538#define EE_ERASE_CMD	(7 << 6)
 539
 540static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
 541{
 542	void __iomem *ioaddr = ep->ioaddr;
 543
 544	ew32(INTMASK, 0x00000000);
 545}
 546
 547static inline void __epic_pci_commit(void __iomem *ioaddr)
 548{
 549#ifndef USE_IO_OPS
 550	er32(INTMASK);
 551#endif
 552}
 553
 554static inline void epic_napi_irq_off(struct net_device *dev,
 555				     struct epic_private *ep)
 556{
 557	void __iomem *ioaddr = ep->ioaddr;
 558
 559	ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
 560	__epic_pci_commit(ioaddr);
 561}
 562
 563static inline void epic_napi_irq_on(struct net_device *dev,
 564				    struct epic_private *ep)
 565{
 566	void __iomem *ioaddr = ep->ioaddr;
 567
 568	/* No need to commit possible posted write */
 569	ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
 570}
 571
 572static int __devinit read_eeprom(struct epic_private *ep, int location)
 573{
 574	void __iomem *ioaddr = ep->ioaddr;
 575	int i;
 576	int retval = 0;
 577	int read_cmd = location |
 578		(er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
 579
 580	ew32(EECTL, EE_ENB & ~EE_CS);
 581	ew32(EECTL, EE_ENB);
 582
 583	/* Shift the read command bits out. */
 584	for (i = 12; i >= 0; i--) {
 585		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
 586		ew32(EECTL, EE_ENB | dataval);
 587		eeprom_delay();
 588		ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
 589		eeprom_delay();
 590	}
 591	ew32(EECTL, EE_ENB);
 592
 593	for (i = 16; i > 0; i--) {
 594		ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
 595		eeprom_delay();
 596		retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
 597		ew32(EECTL, EE_ENB);
 598		eeprom_delay();
 599	}
 600
 601	/* Terminate the EEPROM access. */
 602	ew32(EECTL, EE_ENB & ~EE_CS);
 603	return retval;
 604}
 605
 606#define MII_READOP		1
 607#define MII_WRITEOP		2
 608static int mdio_read(struct net_device *dev, int phy_id, int location)
 609{
 610	struct epic_private *ep = netdev_priv(dev);
 611	void __iomem *ioaddr = ep->ioaddr;
 612	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
 613	int i;
 614
 615	ew32(MIICtrl, read_cmd);
 616	/* Typical operation takes 25 loops. */
 617	for (i = 400; i > 0; i--) {
 618		barrier();
 619		if ((er32(MIICtrl) & MII_READOP) == 0) {
 620			/* Work around read failure bug. */
 621			if (phy_id == 1 && location < 6 &&
 622			    er16(MIIData) == 0xffff) {
 623				ew32(MIICtrl, read_cmd);
 624				continue;
 625			}
 626			return er16(MIIData);
 627		}
 628	}
 629	return 0xffff;
 630}
 631
 632static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
 633{
 634	struct epic_private *ep = netdev_priv(dev);
 635	void __iomem *ioaddr = ep->ioaddr;
 636	int i;
 637
 638	ew16(MIIData, value);
 639	ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
 640	for (i = 10000; i > 0; i--) {
 641		barrier();
 642		if ((er32(MIICtrl) & MII_WRITEOP) == 0)
 643			break;
 644	}
 645}
 646
 647
 648static int epic_open(struct net_device *dev)
 649{
 650	struct epic_private *ep = netdev_priv(dev);
 651	void __iomem *ioaddr = ep->ioaddr;
 652	const int irq = ep->pci_dev->irq;
 653	int rc, i;
 654
 655	/* Soft reset the chip. */
 656	ew32(GENCTL, 0x4001);
 657
 658	napi_enable(&ep->napi);
 659	rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
 660	if (rc) {
 661		napi_disable(&ep->napi);
 662		return rc;
 663	}
 664
 665	epic_init_ring(dev);
 666
 667	ew32(GENCTL, 0x4000);
 668	/* This magic is documented in SMSC app note 7.15 */
 669	for (i = 16; i > 0; i--)
 670		ew32(TEST1, 0x0008);
 671
 672	/* Pull the chip out of low-power mode, enable interrupts, and set for
 673	   PCI read multiple.  The MIIcfg setting and strange write order are
 674	   required by the details of which bits are reset and the transceiver
 675	   wiring on the Ositech CardBus card.
 676	*/
 677#if 0
 678	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 679#endif
 680	if (ep->chip_flags & MII_PWRDWN)
 681		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 682
 683	/* Tell the chip to byteswap descriptors on big-endian hosts */
 684#ifdef __BIG_ENDIAN
 685	ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
 686	er32(GENCTL);
 687	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 688#else
 689	ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
 690	er32(GENCTL);
 691	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 692#endif
 693
 694	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
 695
 696	for (i = 0; i < 3; i++)
 697		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 698
 699	ep->tx_threshold = TX_FIFO_THRESH;
 700	ew32(TxThresh, ep->tx_threshold);
 701
 702	if (media2miictl[dev->if_port & 15]) {
 703		if (ep->mii_phy_cnt)
 704			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
 705		if (dev->if_port == 1) {
 706			if (debug > 1)
 707				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
 708					   "status %4.4x.\n",
 709					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
 710		}
 711	} else {
 712		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
 713		if (mii_lpa != 0xffff) {
 714			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
 715				ep->mii.full_duplex = 1;
 716			else if (! (mii_lpa & LPA_LPACK))
 717				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 718			if (debug > 1)
 719				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
 720					   " register read of %4.4x.\n", dev->name,
 721					   ep->mii.full_duplex ? "full" : "half",
 722					   ep->phys[0], mii_lpa);
 723		}
 724	}
 725
 726	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 727	ew32(PRxCDAR, ep->rx_ring_dma);
 728	ew32(PTxCDAR, ep->tx_ring_dma);
 729
 730	/* Start the chip's Rx process. */
 731	set_rx_mode(dev);
 732	ew32(COMMAND, StartRx | RxQueued);
 733
 734	netif_start_queue(dev);
 735
 736	/* Enable interrupts by setting the interrupt mask. */
 737	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 738	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 739	     TxUnderrun);
 740
 741	if (debug > 1) {
 742		printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
 743		       "status %4.4x %s-duplex.\n",
 744		       dev->name, ioaddr, irq, er32(GENCTL),
 745		       ep->mii.full_duplex ? "full" : "half");
 746	}
 747
 748	/* Set the timer to switch to check for link beat and perhaps switch
 749	   to an alternate media type. */
 750	init_timer(&ep->timer);
 751	ep->timer.expires = jiffies + 3*HZ;
 752	ep->timer.data = (unsigned long)dev;
 753	ep->timer.function = epic_timer;				/* timer handler */
 754	add_timer(&ep->timer);
 755
 756	return rc;
 757}
 758
 759/* Reset the chip to recover from a PCI transaction error.
 760   This may occur at interrupt time. */
 761static void epic_pause(struct net_device *dev)
 762{
 763	struct net_device_stats *stats = &dev->stats;
 764	struct epic_private *ep = netdev_priv(dev);
 765	void __iomem *ioaddr = ep->ioaddr;
 766
 767	netif_stop_queue (dev);
 768
 769	/* Disable interrupts by clearing the interrupt mask. */
 770	ew32(INTMASK, 0x00000000);
 771	/* Stop the chip's Tx and Rx DMA processes. */
 772	ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
 773
 774	/* Update the error counts. */
 775	if (er16(COMMAND) != 0xffff) {
 776		stats->rx_missed_errors	+= er8(MPCNT);
 777		stats->rx_frame_errors	+= er8(ALICNT);
 778		stats->rx_crc_errors	+= er8(CRCCNT);
 779	}
 780
 781	/* Remove the packets on the Rx queue. */
 782	epic_rx(dev, RX_RING_SIZE);
 783}
 784
 785static void epic_restart(struct net_device *dev)
 786{
 787	struct epic_private *ep = netdev_priv(dev);
 788	void __iomem *ioaddr = ep->ioaddr;
 789	int i;
 790
 791	/* Soft reset the chip. */
 792	ew32(GENCTL, 0x4001);
 793
 794	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
 795		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
 796	udelay(1);
 797
 798	/* This magic is documented in SMSC app note 7.15 */
 799	for (i = 16; i > 0; i--)
 800		ew32(TEST1, 0x0008);
 801
 802#ifdef __BIG_ENDIAN
 803	ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
 804#else
 805	ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
 806#endif
 807	ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
 808	if (ep->chip_flags & MII_PWRDWN)
 809		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
 810
 811	for (i = 0; i < 3; i++)
 812		ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
 813
 814	ep->tx_threshold = TX_FIFO_THRESH;
 815	ew32(TxThresh, ep->tx_threshold);
 816	ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
 817	ew32(PRxCDAR, ep->rx_ring_dma +
 818	     (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
 819	ew32(PTxCDAR, ep->tx_ring_dma +
 820	     (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
 821
 822	/* Start the chip's Rx process. */
 823	set_rx_mode(dev);
 824	ew32(COMMAND, StartRx | RxQueued);
 825
 826	/* Enable interrupts by setting the interrupt mask. */
 827	ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
 828	     ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
 829	     TxUnderrun);
 830
 831	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
 832		   " interrupt %4.4x.\n",
 833		   dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
 834}
 835
 836static void check_media(struct net_device *dev)
 837{
 838	struct epic_private *ep = netdev_priv(dev);
 839	void __iomem *ioaddr = ep->ioaddr;
 840	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
 841	int negotiated = mii_lpa & ep->mii.advertising;
 842	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 843
 844	if (ep->mii.force_media)
 845		return;
 846	if (mii_lpa == 0xffff)		/* Bogus read */
 847		return;
 848	if (ep->mii.full_duplex != duplex) {
 849		ep->mii.full_duplex = duplex;
 850		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
 851			   " partner capability of %4.4x.\n", dev->name,
 852			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
 853		ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
 854	}
 855}
 856
 857static void epic_timer(unsigned long data)
 858{
 859	struct net_device *dev = (struct net_device *)data;
 860	struct epic_private *ep = netdev_priv(dev);
 861	void __iomem *ioaddr = ep->ioaddr;
 862	int next_tick = 5*HZ;
 863
 864	if (debug > 3) {
 865		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
 866		       dev->name, er32(TxSTAT));
 867		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
 868		       "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
 869		       er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
 870	}
 871
 872	check_media(dev);
 873
 874	ep->timer.expires = jiffies + next_tick;
 875	add_timer(&ep->timer);
 876}
 877
 878static void epic_tx_timeout(struct net_device *dev)
 879{
 880	struct epic_private *ep = netdev_priv(dev);
 881	void __iomem *ioaddr = ep->ioaddr;
 882
 883	if (debug > 0) {
 884		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
 885		       "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
 886		if (debug > 1) {
 887			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
 888				   dev->name, ep->dirty_tx, ep->cur_tx);
 889		}
 890	}
 891	if (er16(TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
 892		dev->stats.tx_fifo_errors++;
 893		ew32(COMMAND, RestartTx);
 894	} else {
 895		epic_restart(dev);
 896		ew32(COMMAND, TxQueued);
 897	}
 898
 899	dev->trans_start = jiffies; /* prevent tx timeout */
 900	dev->stats.tx_errors++;
 901	if (!ep->tx_full)
 902		netif_wake_queue(dev);
 903}
 904
 905/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 906static void epic_init_ring(struct net_device *dev)
 907{
 908	struct epic_private *ep = netdev_priv(dev);
 909	int i;
 910
 911	ep->tx_full = 0;
 912	ep->dirty_tx = ep->cur_tx = 0;
 913	ep->cur_rx = ep->dirty_rx = 0;
 914	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 915
 916	/* Initialize all Rx descriptors. */
 917	for (i = 0; i < RX_RING_SIZE; i++) {
 918		ep->rx_ring[i].rxstatus = 0;
 919		ep->rx_ring[i].buflength = ep->rx_buf_sz;
 920		ep->rx_ring[i].next = ep->rx_ring_dma +
 921				      (i+1)*sizeof(struct epic_rx_desc);
 922		ep->rx_skbuff[i] = NULL;
 923	}
 924	/* Mark the last entry as wrapping the ring. */
 925	ep->rx_ring[i-1].next = ep->rx_ring_dma;
 926
 927	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
 928	for (i = 0; i < RX_RING_SIZE; i++) {
 929		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
 930		ep->rx_skbuff[i] = skb;
 931		if (skb == NULL)
 932			break;
 933		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
 934		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 935			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
 
 936		ep->rx_ring[i].rxstatus = DescOwn;
 937	}
 938	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
 939
 940	/* The Tx buffer descriptor is filled in as needed, but we
 941	   do need to clear the ownership bit. */
 942	for (i = 0; i < TX_RING_SIZE; i++) {
 943		ep->tx_skbuff[i] = NULL;
 944		ep->tx_ring[i].txstatus = 0x0000;
 945		ep->tx_ring[i].next = ep->tx_ring_dma +
 946			(i+1)*sizeof(struct epic_tx_desc);
 947	}
 948	ep->tx_ring[i-1].next = ep->tx_ring_dma;
 949}
 950
 951static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
 952{
 953	struct epic_private *ep = netdev_priv(dev);
 954	void __iomem *ioaddr = ep->ioaddr;
 955	int entry, free_count;
 956	u32 ctrl_word;
 957	unsigned long flags;
 958
 959	if (skb_padto(skb, ETH_ZLEN))
 960		return NETDEV_TX_OK;
 961
 962	/* Caution: the write order is important here, set the field with the
 963	   "ownership" bit last. */
 964
 965	/* Calculate the next Tx descriptor entry. */
 966	spin_lock_irqsave(&ep->lock, flags);
 967	free_count = ep->cur_tx - ep->dirty_tx;
 968	entry = ep->cur_tx % TX_RING_SIZE;
 969
 970	ep->tx_skbuff[entry] = skb;
 971	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 972		 			            skb->len, PCI_DMA_TODEVICE);
 
 973	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 974		ctrl_word = 0x100000; /* No interrupt */
 975	} else if (free_count == TX_QUEUE_LEN/2) {
 976		ctrl_word = 0x140000; /* Tx-done intr. */
 977	} else if (free_count < TX_QUEUE_LEN - 1) {
 978		ctrl_word = 0x100000; /* No Tx-done intr. */
 979	} else {
 980		/* Leave room for an additional entry. */
 981		ctrl_word = 0x140000; /* Tx-done intr. */
 982		ep->tx_full = 1;
 983	}
 984	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
 985	ep->tx_ring[entry].txstatus =
 986		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
 987			    | DescOwn;
 988
 989	ep->cur_tx++;
 990	if (ep->tx_full)
 991		netif_stop_queue(dev);
 992
 993	spin_unlock_irqrestore(&ep->lock, flags);
 994	/* Trigger an immediate transmit demand. */
 995	ew32(COMMAND, TxQueued);
 996
 997	if (debug > 4)
 998		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
 999		       "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
1000		       entry, ctrl_word, er32(TxSTAT));
1001
1002	return NETDEV_TX_OK;
1003}
1004
1005static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1006			  int status)
1007{
1008	struct net_device_stats *stats = &dev->stats;
1009
1010#ifndef final_version
1011	/* There was an major error, log it. */
1012	if (debug > 1)
1013		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1014		       dev->name, status);
1015#endif
1016	stats->tx_errors++;
1017	if (status & 0x1050)
1018		stats->tx_aborted_errors++;
1019	if (status & 0x0008)
1020		stats->tx_carrier_errors++;
1021	if (status & 0x0040)
1022		stats->tx_window_errors++;
1023	if (status & 0x0010)
1024		stats->tx_fifo_errors++;
1025}
1026
1027static void epic_tx(struct net_device *dev, struct epic_private *ep)
1028{
1029	unsigned int dirty_tx, cur_tx;
1030
1031	/*
1032	 * Note: if this lock becomes a problem we can narrow the locked
1033	 * region at the cost of occasionally grabbing the lock more times.
1034	 */
1035	cur_tx = ep->cur_tx;
1036	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1037		struct sk_buff *skb;
1038		int entry = dirty_tx % TX_RING_SIZE;
1039		int txstatus = ep->tx_ring[entry].txstatus;
1040
1041		if (txstatus & DescOwn)
1042			break;	/* It still hasn't been Txed */
1043
1044		if (likely(txstatus & 0x0001)) {
1045			dev->stats.collisions += (txstatus >> 8) & 15;
1046			dev->stats.tx_packets++;
1047			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1048		} else
1049			epic_tx_error(dev, ep, txstatus);
1050
1051		/* Free the original skb. */
1052		skb = ep->tx_skbuff[entry];
1053		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1054				 skb->len, PCI_DMA_TODEVICE);
1055		dev_kfree_skb_irq(skb);
 
1056		ep->tx_skbuff[entry] = NULL;
1057	}
1058
1059#ifndef final_version
1060	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1061		printk(KERN_WARNING
1062		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1063		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1064		dirty_tx += TX_RING_SIZE;
1065	}
1066#endif
1067	ep->dirty_tx = dirty_tx;
1068	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1069		/* The ring is no longer full, allow new TX entries. */
1070		ep->tx_full = 0;
1071		netif_wake_queue(dev);
1072	}
1073}
1074
1075/* The interrupt handler does all of the Rx thread work and cleans up
1076   after the Tx thread. */
1077static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1078{
1079	struct net_device *dev = dev_instance;
1080	struct epic_private *ep = netdev_priv(dev);
1081	void __iomem *ioaddr = ep->ioaddr;
1082	unsigned int handled = 0;
1083	int status;
1084
1085	status = er32(INTSTAT);
1086	/* Acknowledge all of the current interrupt sources ASAP. */
1087	ew32(INTSTAT, status & EpicNormalEvent);
1088
1089	if (debug > 4) {
1090		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1091		       "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1092	}
1093
1094	if ((status & IntrSummary) == 0)
1095		goto out;
1096
1097	handled = 1;
1098
1099	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1100		spin_lock(&ep->napi_lock);
1101		if (napi_schedule_prep(&ep->napi)) {
1102			epic_napi_irq_off(dev, ep);
1103			__napi_schedule(&ep->napi);
1104		} else
1105			ep->reschedule_in_poll++;
1106		spin_unlock(&ep->napi_lock);
1107	}
1108	status &= ~EpicNapiEvent;
1109
1110	/* Check uncommon events all at once. */
1111	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1112		struct net_device_stats *stats = &dev->stats;
1113
1114		if (status == EpicRemoved)
1115			goto out;
1116
1117		/* Always update the error counts to avoid overhead later. */
1118		stats->rx_missed_errors	+= er8(MPCNT);
1119		stats->rx_frame_errors	+= er8(ALICNT);
1120		stats->rx_crc_errors	+= er8(CRCCNT);
1121
1122		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1123			stats->tx_fifo_errors++;
1124			ew32(TxThresh, ep->tx_threshold += 128);
1125			/* Restart the transmit process. */
1126			ew32(COMMAND, RestartTx);
1127		}
1128		if (status & PCIBusErr170) {
1129			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1130					 dev->name, status);
1131			epic_pause(dev);
1132			epic_restart(dev);
1133		}
1134		/* Clear all error sources. */
1135		ew32(INTSTAT, status & 0x7f18);
1136	}
1137
1138out:
1139	if (debug > 3) {
1140		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1141				   dev->name, status);
1142	}
1143
1144	return IRQ_RETVAL(handled);
1145}
1146
1147static int epic_rx(struct net_device *dev, int budget)
1148{
1149	struct epic_private *ep = netdev_priv(dev);
1150	int entry = ep->cur_rx % RX_RING_SIZE;
1151	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1152	int work_done = 0;
1153
1154	if (debug > 4)
1155		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1156			   ep->rx_ring[entry].rxstatus);
1157
1158	if (rx_work_limit > budget)
1159		rx_work_limit = budget;
1160
1161	/* If we own the next entry, it's a new packet. Send it up. */
1162	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1163		int status = ep->rx_ring[entry].rxstatus;
1164
1165		if (debug > 4)
1166			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
 
1167		if (--rx_work_limit < 0)
1168			break;
1169		if (status & 0x2006) {
1170			if (debug > 2)
1171				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1172					   dev->name, status);
1173			if (status & 0x2000) {
1174				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1175					   "multiple buffers, status %4.4x!\n", dev->name, status);
1176				dev->stats.rx_length_errors++;
1177			} else if (status & 0x0006)
1178				/* Rx Frame errors are counted in hardware. */
1179				dev->stats.rx_errors++;
1180		} else {
1181			/* Malloc up new buffer, compatible with net-2e. */
1182			/* Omit the four octet CRC from the length. */
1183			short pkt_len = (status >> 16) - 4;
1184			struct sk_buff *skb;
1185
1186			if (pkt_len > PKT_BUF_SZ - 4) {
1187				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1188					   "%d bytes.\n",
1189					   dev->name, status, pkt_len);
1190				pkt_len = 1514;
1191			}
1192			/* Check if the packet is long enough to accept without copying
1193			   to a minimally-sized skbuff. */
1194			if (pkt_len < rx_copybreak &&
1195			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1196				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1197				pci_dma_sync_single_for_cpu(ep->pci_dev,
1198							    ep->rx_ring[entry].bufaddr,
1199							    ep->rx_buf_sz,
1200							    PCI_DMA_FROMDEVICE);
1201				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1202				skb_put(skb, pkt_len);
1203				pci_dma_sync_single_for_device(ep->pci_dev,
1204							       ep->rx_ring[entry].bufaddr,
1205							       ep->rx_buf_sz,
1206							       PCI_DMA_FROMDEVICE);
1207			} else {
1208				pci_unmap_single(ep->pci_dev,
1209					ep->rx_ring[entry].bufaddr,
1210					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
1211				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1212				ep->rx_skbuff[entry] = NULL;
1213			}
1214			skb->protocol = eth_type_trans(skb, dev);
1215			netif_receive_skb(skb);
1216			dev->stats.rx_packets++;
1217			dev->stats.rx_bytes += pkt_len;
1218		}
1219		work_done++;
1220		entry = (++ep->cur_rx) % RX_RING_SIZE;
1221	}
1222
1223	/* Refill the Rx ring buffers. */
1224	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1225		entry = ep->dirty_rx % RX_RING_SIZE;
1226		if (ep->rx_skbuff[entry] == NULL) {
1227			struct sk_buff *skb;
1228			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1229			if (skb == NULL)
1230				break;
1231			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1232			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1233				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
 
1234			work_done++;
1235		}
1236		/* AV: shouldn't we add a barrier here? */
1237		ep->rx_ring[entry].rxstatus = DescOwn;
1238	}
1239	return work_done;
1240}
1241
1242static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1243{
1244	void __iomem *ioaddr = ep->ioaddr;
1245	int status;
1246
1247	status = er32(INTSTAT);
1248
1249	if (status == EpicRemoved)
1250		return;
1251	if (status & RxOverflow) 	/* Missed a Rx frame. */
1252		dev->stats.rx_errors++;
1253	if (status & (RxOverflow | RxFull))
1254		ew16(COMMAND, RxQueued);
1255}
1256
1257static int epic_poll(struct napi_struct *napi, int budget)
1258{
1259	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1260	struct net_device *dev = ep->mii.dev;
1261	int work_done = 0;
1262	void __iomem *ioaddr = ep->ioaddr;
1263
1264rx_action:
1265
1266	epic_tx(dev, ep);
1267
1268	work_done += epic_rx(dev, budget);
1269
1270	epic_rx_err(dev, ep);
1271
1272	if (work_done < budget) {
1273		unsigned long flags;
1274		int more;
1275
1276		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1277
1278		spin_lock_irqsave(&ep->napi_lock, flags);
1279
1280		more = ep->reschedule_in_poll;
1281		if (!more) {
1282			__napi_complete(napi);
1283			ew32(INTSTAT, EpicNapiEvent);
1284			epic_napi_irq_on(dev, ep);
1285		} else
1286			ep->reschedule_in_poll--;
1287
1288		spin_unlock_irqrestore(&ep->napi_lock, flags);
1289
1290		if (more)
1291			goto rx_action;
1292	}
1293
1294	return work_done;
1295}
1296
1297static int epic_close(struct net_device *dev)
1298{
1299	struct epic_private *ep = netdev_priv(dev);
1300	struct pci_dev *pdev = ep->pci_dev;
1301	void __iomem *ioaddr = ep->ioaddr;
1302	struct sk_buff *skb;
1303	int i;
1304
1305	netif_stop_queue(dev);
1306	napi_disable(&ep->napi);
1307
1308	if (debug > 1)
1309		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1310		       dev->name, er32(INTSTAT));
1311
1312	del_timer_sync(&ep->timer);
1313
1314	epic_disable_int(dev, ep);
1315
1316	free_irq(pdev->irq, dev);
1317
1318	epic_pause(dev);
1319
1320	/* Free all the skbuffs in the Rx queue. */
1321	for (i = 0; i < RX_RING_SIZE; i++) {
1322		skb = ep->rx_skbuff[i];
1323		ep->rx_skbuff[i] = NULL;
1324		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1325		ep->rx_ring[i].buflength = 0;
1326		if (skb) {
1327			pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1328				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1329			dev_kfree_skb(skb);
1330		}
1331		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1332	}
1333	for (i = 0; i < TX_RING_SIZE; i++) {
1334		skb = ep->tx_skbuff[i];
1335		ep->tx_skbuff[i] = NULL;
1336		if (!skb)
1337			continue;
1338		pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1339				 PCI_DMA_TODEVICE);
1340		dev_kfree_skb(skb);
1341	}
1342
1343	/* Green! Leave the chip in low-power mode. */
1344	ew32(GENCTL, 0x0008);
1345
1346	return 0;
1347}
1348
1349static struct net_device_stats *epic_get_stats(struct net_device *dev)
1350{
1351	struct epic_private *ep = netdev_priv(dev);
1352	void __iomem *ioaddr = ep->ioaddr;
1353
1354	if (netif_running(dev)) {
1355		struct net_device_stats *stats = &dev->stats;
1356
1357		stats->rx_missed_errors	+= er8(MPCNT);
1358		stats->rx_frame_errors	+= er8(ALICNT);
1359		stats->rx_crc_errors	+= er8(CRCCNT);
1360	}
1361
1362	return &dev->stats;
1363}
1364
1365/* Set or clear the multicast filter for this adaptor.
1366   Note that we only use exclusion around actually queueing the
1367   new frame, not around filling ep->setup_frame.  This is non-deterministic
1368   when re-entered but still correct. */
1369
1370static void set_rx_mode(struct net_device *dev)
1371{
1372	struct epic_private *ep = netdev_priv(dev);
1373	void __iomem *ioaddr = ep->ioaddr;
1374	unsigned char mc_filter[8];		 /* Multicast hash filter */
1375	int i;
1376
1377	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1378		ew32(RxCtrl, 0x002c);
1379		/* Unconditionally log net taps. */
1380		memset(mc_filter, 0xff, sizeof(mc_filter));
1381	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1382		/* There is apparently a chip bug, so the multicast filter
1383		   is never enabled. */
1384		/* Too many to filter perfectly -- accept all multicasts. */
1385		memset(mc_filter, 0xff, sizeof(mc_filter));
1386		ew32(RxCtrl, 0x000c);
1387	} else if (netdev_mc_empty(dev)) {
1388		ew32(RxCtrl, 0x0004);
1389		return;
1390	} else {					/* Never executed, for now. */
1391		struct netdev_hw_addr *ha;
1392
1393		memset(mc_filter, 0, sizeof(mc_filter));
1394		netdev_for_each_mc_addr(ha, dev) {
1395			unsigned int bit_nr =
1396				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1397			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1398		}
1399	}
1400	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1401	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1402		for (i = 0; i < 4; i++)
1403			ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1404		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1405	}
1406}
1407
1408static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1409{
1410	struct epic_private *np = netdev_priv(dev);
1411
1412	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1413	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1414	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1415}
1416
1417static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1418{
1419	struct epic_private *np = netdev_priv(dev);
1420	int rc;
1421
1422	spin_lock_irq(&np->lock);
1423	rc = mii_ethtool_gset(&np->mii, cmd);
1424	spin_unlock_irq(&np->lock);
1425
1426	return rc;
1427}
1428
1429static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1430{
1431	struct epic_private *np = netdev_priv(dev);
1432	int rc;
1433
1434	spin_lock_irq(&np->lock);
1435	rc = mii_ethtool_sset(&np->mii, cmd);
1436	spin_unlock_irq(&np->lock);
1437
1438	return rc;
1439}
1440
1441static int netdev_nway_reset(struct net_device *dev)
1442{
1443	struct epic_private *np = netdev_priv(dev);
1444	return mii_nway_restart(&np->mii);
1445}
1446
1447static u32 netdev_get_link(struct net_device *dev)
1448{
1449	struct epic_private *np = netdev_priv(dev);
1450	return mii_link_ok(&np->mii);
1451}
1452
1453static u32 netdev_get_msglevel(struct net_device *dev)
1454{
1455	return debug;
1456}
1457
1458static void netdev_set_msglevel(struct net_device *dev, u32 value)
1459{
1460	debug = value;
1461}
1462
1463static int ethtool_begin(struct net_device *dev)
1464{
1465	struct epic_private *ep = netdev_priv(dev);
1466	void __iomem *ioaddr = ep->ioaddr;
1467
 
 
1468	/* power-up, if interface is down */
1469	if (!netif_running(dev)) {
1470		ew32(GENCTL, 0x0200);
1471		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1472	}
1473	return 0;
1474}
1475
1476static void ethtool_complete(struct net_device *dev)
1477{
1478	struct epic_private *ep = netdev_priv(dev);
1479	void __iomem *ioaddr = ep->ioaddr;
1480
1481	/* power-down, if interface is down */
1482	if (!netif_running(dev)) {
1483		ew32(GENCTL, 0x0008);
1484		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1485	}
1486}
1487
1488static const struct ethtool_ops netdev_ethtool_ops = {
1489	.get_drvinfo		= netdev_get_drvinfo,
1490	.get_settings		= netdev_get_settings,
1491	.set_settings		= netdev_set_settings,
1492	.nway_reset		= netdev_nway_reset,
1493	.get_link		= netdev_get_link,
1494	.get_msglevel		= netdev_get_msglevel,
1495	.set_msglevel		= netdev_set_msglevel,
1496	.begin			= ethtool_begin,
1497	.complete		= ethtool_complete
 
 
1498};
1499
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{
1502	struct epic_private *np = netdev_priv(dev);
1503	void __iomem *ioaddr = np->ioaddr;
1504	struct mii_ioctl_data *data = if_mii(rq);
1505	int rc;
1506
1507	/* power-up, if interface is down */
1508	if (! netif_running(dev)) {
1509		ew32(GENCTL, 0x0200);
1510		ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1511	}
1512
1513	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1514	spin_lock_irq(&np->lock);
1515	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1516	spin_unlock_irq(&np->lock);
1517
1518	/* power-down, if interface is down */
1519	if (! netif_running(dev)) {
1520		ew32(GENCTL, 0x0008);
1521		ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1522	}
1523	return rc;
1524}
1525
1526
1527static void __devexit epic_remove_one(struct pci_dev *pdev)
1528{
1529	struct net_device *dev = pci_get_drvdata(pdev);
1530	struct epic_private *ep = netdev_priv(dev);
1531
1532	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534	unregister_netdev(dev);
 
 
 
 
1535	pci_iounmap(pdev, ep->ioaddr);
 
1536	pci_release_regions(pdev);
1537	free_netdev(dev);
1538	pci_disable_device(pdev);
1539	pci_set_drvdata(pdev, NULL);
1540	/* pci_power_off(pdev, -1); */
1541}
1542
1543
1544#ifdef CONFIG_PM
1545
1546static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1547{
1548	struct net_device *dev = pci_get_drvdata(pdev);
1549	struct epic_private *ep = netdev_priv(dev);
1550	void __iomem *ioaddr = ep->ioaddr;
1551
1552	if (!netif_running(dev))
1553		return 0;
1554	epic_pause(dev);
1555	/* Put the chip into low-power mode. */
1556	ew32(GENCTL, 0x0008);
1557	/* pci_power_off(pdev, -1); */
1558	return 0;
1559}
1560
1561
1562static int epic_resume (struct pci_dev *pdev)
1563{
1564	struct net_device *dev = pci_get_drvdata(pdev);
1565
1566	if (!netif_running(dev))
1567		return 0;
1568	epic_restart(dev);
1569	/* pci_power_on(pdev); */
1570	return 0;
1571}
1572
1573#endif /* CONFIG_PM */
1574
1575
1576static struct pci_driver epic_driver = {
1577	.name		= DRV_NAME,
1578	.id_table	= epic_pci_tbl,
1579	.probe		= epic_init_one,
1580	.remove		= __devexit_p(epic_remove_one),
1581#ifdef CONFIG_PM
1582	.suspend	= epic_suspend,
1583	.resume		= epic_resume,
1584#endif /* CONFIG_PM */
1585};
1586
1587
1588static int __init epic_init (void)
1589{
1590/* when a module, this is printed whether or not devices are found in probe */
1591#ifdef MODULE
1592	printk (KERN_INFO "%s%s",
1593		version, version2);
1594#endif
1595
1596	return pci_register_driver(&epic_driver);
1597}
1598
1599
1600static void __exit epic_cleanup (void)
1601{
1602	pci_unregister_driver (&epic_driver);
1603}
1604
1605
1606module_init(epic_init);
1607module_exit(epic_cleanup);