Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2	Written 1998-2000 by Donald Becker.
   3
   4	This software may be used and distributed according to the terms of
   5	the GNU General Public License (GPL), incorporated herein by reference.
   6	Drivers based on or derived from this code fall under the GPL and must
   7	retain the authorship, copyright and license notice.  This file is not
   8	a complete program and may only be used when the entire operating
   9	system is licensed under the GPL.
  10
  11	The author may be reached as becker@scyld.com, or C/O
  12	Scyld Computing Corporation
  13	410 Severn Ave., Suite 210
  14	Annapolis MD 21403
  15
  16	Support information and updates available at
  17	http://www.scyld.com/network/pci-skeleton.html
  18
  19	Linux kernel updates:
  20
  21	Version 2.51, Nov 17, 2001 (jgarzik):
  22	- Add ethtool support
  23	- Replace some MII-related magic numbers with constants
  24
  25*/
  26
  27#define DRV_NAME	"fealnx"
 
 
  28
  29static int debug;		/* 1-> print debug message */
  30static int max_interrupt_work = 20;
  31
  32/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
  33static int multicast_filter_limit = 32;
  34
  35/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
  36/* Setting to > 1518 effectively disables this feature.          */
  37static int rx_copybreak;
  38
  39/* Used to pass the media type, etc.                            */
  40/* Both 'options[]' and 'full_duplex[]' should exist for driver */
  41/* interoperability.                                            */
  42/* The media type is usually passed in 'options[]'.             */
  43#define MAX_UNITS 8		/* More are supported, limit only on options */
  44static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
  45static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
  46
  47/* Operational parameters that are set at compile time.                 */
  48/* Keep the ring sizes a power of two for compile efficiency.           */
  49/* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
  50/* Making the Tx ring too large decreases the effectiveness of channel  */
  51/* bonding and packet priority.                                         */
  52/* There are no ill effects from too-large receive rings.               */
  53// 88-12-9 modify,
  54// #define TX_RING_SIZE    16
  55// #define RX_RING_SIZE    32
  56#define TX_RING_SIZE    6
  57#define RX_RING_SIZE    12
  58#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct fealnx_desc)
  59#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct fealnx_desc)
  60
  61/* Operational parameters that usually are not changed. */
  62/* Time in jiffies before concluding the transmitter is hung. */
  63#define TX_TIMEOUT      (2*HZ)
  64
  65#define PKT_BUF_SZ      1536	/* Size of each temporary Rx buffer. */
  66
  67
  68/* Include files, designed to support most kernel versions 2.0.0 and later. */
  69#include <linux/module.h>
  70#include <linux/kernel.h>
  71#include <linux/string.h>
  72#include <linux/timer.h>
  73#include <linux/errno.h>
  74#include <linux/ioport.h>
  75#include <linux/interrupt.h>
  76#include <linux/pci.h>
  77#include <linux/netdevice.h>
  78#include <linux/etherdevice.h>
  79#include <linux/skbuff.h>
  80#include <linux/init.h>
  81#include <linux/mii.h>
  82#include <linux/ethtool.h>
  83#include <linux/crc32.h>
  84#include <linux/delay.h>
  85#include <linux/bitops.h>
  86
  87#include <asm/processor.h>	/* Processor type for cache alignment. */
  88#include <asm/io.h>
  89#include <linux/uaccess.h>
  90#include <asm/byteorder.h>
  91
 
 
 
 
 
  92/* This driver was written to use PCI memory space, however some x86 systems
  93   work only with I/O space accesses. */
  94#ifndef __alpha__
  95#define USE_IO_OPS
  96#endif
  97
  98/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
  99/* This is only in the support-all-kernels source code. */
 100
 101#define RUN_AT(x) (jiffies + (x))
 102
 103MODULE_AUTHOR("Myson or whoever");
 104MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
 105MODULE_LICENSE("GPL");
 106module_param(max_interrupt_work, int, 0);
 107module_param(debug, int, 0);
 108module_param(rx_copybreak, int, 0);
 109module_param(multicast_filter_limit, int, 0);
 110module_param_array(options, int, NULL, 0);
 111module_param_array(full_duplex, int, NULL, 0);
 112MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
 113MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
 114MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
 115MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
 116MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
 117MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
 118
 119enum {
 120	MIN_REGION_SIZE		= 136,
 121};
 122
 123/* A chip capabilities table, matching the entries in pci_tbl[] above. */
 124enum chip_capability_flags {
 125	HAS_MII_XCVR,
 126	HAS_CHIP_XCVR,
 127};
 128
 129/* 89/6/13 add, */
 130/* for different PHY */
 131enum phy_type_flags {
 132	MysonPHY = 1,
 133	AhdocPHY = 2,
 134	SeeqPHY = 3,
 135	MarvellPHY = 4,
 136	Myson981 = 5,
 137	LevelOnePHY = 6,
 138	OtherPHY = 10,
 139};
 140
 141struct chip_info {
 142	char *chip_name;
 143	int flags;
 144};
 145
 146static const struct chip_info skel_netdrv_tbl[] = {
 147	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
 148	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
 149	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
 150};
 151
 152/* Offsets to the Command and Status Registers. */
 153enum fealnx_offsets {
 154	PAR0 = 0x0,		/* physical address 0-3 */
 155	PAR1 = 0x04,		/* physical address 4-5 */
 156	MAR0 = 0x08,		/* multicast address 0-3 */
 157	MAR1 = 0x0C,		/* multicast address 4-7 */
 158	FAR0 = 0x10,		/* flow-control address 0-3 */
 159	FAR1 = 0x14,		/* flow-control address 4-5 */
 160	TCRRCR = 0x18,		/* receive & transmit configuration */
 161	BCR = 0x1C,		/* bus command */
 162	TXPDR = 0x20,		/* transmit polling demand */
 163	RXPDR = 0x24,		/* receive polling demand */
 164	RXCWP = 0x28,		/* receive current word pointer */
 165	TXLBA = 0x2C,		/* transmit list base address */
 166	RXLBA = 0x30,		/* receive list base address */
 167	ISR = 0x34,		/* interrupt status */
 168	IMR = 0x38,		/* interrupt mask */
 169	FTH = 0x3C,		/* flow control high/low threshold */
 170	MANAGEMENT = 0x40,	/* bootrom/eeprom and mii management */
 171	TALLY = 0x44,		/* tally counters for crc and mpa */
 172	TSR = 0x48,		/* tally counter for transmit status */
 173	BMCRSR = 0x4c,		/* basic mode control and status */
 174	PHYIDENTIFIER = 0x50,	/* phy identifier */
 175	ANARANLPAR = 0x54,	/* auto-negotiation advertisement and link
 176				   partner ability */
 177	ANEROCR = 0x58,		/* auto-negotiation expansion and pci conf. */
 178	BPREMRPSR = 0x5c,	/* bypass & receive error mask and phy status */
 179};
 180
 181/* Bits in the interrupt status/enable registers. */
 182/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
 183enum intr_status_bits {
 184	RFCON = 0x00020000,	/* receive flow control xon packet */
 185	RFCOFF = 0x00010000,	/* receive flow control xoff packet */
 186	LSCStatus = 0x00008000,	/* link status change */
 187	ANCStatus = 0x00004000,	/* autonegotiation completed */
 188	FBE = 0x00002000,	/* fatal bus error */
 189	FBEMask = 0x00001800,	/* mask bit12-11 */
 190	ParityErr = 0x00000000,	/* parity error */
 191	TargetErr = 0x00001000,	/* target abort */
 192	MasterErr = 0x00000800,	/* master error */
 193	TUNF = 0x00000400,	/* transmit underflow */
 194	ROVF = 0x00000200,	/* receive overflow */
 195	ETI = 0x00000100,	/* transmit early int */
 196	ERI = 0x00000080,	/* receive early int */
 197	CNTOVF = 0x00000040,	/* counter overflow */
 198	RBU = 0x00000020,	/* receive buffer unavailable */
 199	TBU = 0x00000010,	/* transmit buffer unavilable */
 200	TI = 0x00000008,	/* transmit interrupt */
 201	RI = 0x00000004,	/* receive interrupt */
 202	RxErr = 0x00000002,	/* receive error */
 203};
 204
 205/* Bits in the NetworkConfig register, W for writing, R for reading */
 206/* FIXME: some names are invented by me. Marked with (name?) */
 207/* If you have docs and know bit names, please fix 'em */
 208enum rx_mode_bits {
 209	CR_W_ENH	= 0x02000000,	/* enhanced mode (name?) */
 210	CR_W_FD		= 0x00100000,	/* full duplex */
 211	CR_W_PS10	= 0x00080000,	/* 10 mbit */
 212	CR_W_TXEN	= 0x00040000,	/* tx enable (name?) */
 213	CR_W_PS1000	= 0x00010000,	/* 1000 mbit */
 214     /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
 215	CR_W_RXMODEMASK	= 0x000000e0,
 216	CR_W_PROM	= 0x00000080,	/* promiscuous mode */
 217	CR_W_AB		= 0x00000040,	/* accept broadcast */
 218	CR_W_AM		= 0x00000020,	/* accept mutlicast */
 219	CR_W_ARP	= 0x00000008,	/* receive runt pkt */
 220	CR_W_ALP	= 0x00000004,	/* receive long pkt */
 221	CR_W_SEP	= 0x00000002,	/* receive error pkt */
 222	CR_W_RXEN	= 0x00000001,	/* rx enable (unicast?) (name?) */
 223
 224	CR_R_TXSTOP	= 0x04000000,	/* tx stopped (name?) */
 225	CR_R_FD		= 0x00100000,	/* full duplex detected */
 226	CR_R_PS10	= 0x00080000,	/* 10 mbit detected */
 227	CR_R_RXSTOP	= 0x00008000,	/* rx stopped (name?) */
 228};
 229
 230/* The Tulip Rx and Tx buffer descriptors. */
 231struct fealnx_desc {
 232	s32 status;
 233	s32 control;
 234	u32 buffer;
 235	u32 next_desc;
 236	struct fealnx_desc *next_desc_logical;
 237	struct sk_buff *skbuff;
 238	u32 reserved1;
 239	u32 reserved2;
 240};
 241
 242/* Bits in network_desc.status */
 243enum rx_desc_status_bits {
 244	RXOWN = 0x80000000,	/* own bit */
 245	FLNGMASK = 0x0fff0000,	/* frame length */
 246	FLNGShift = 16,
 247	MARSTATUS = 0x00004000,	/* multicast address received */
 248	BARSTATUS = 0x00002000,	/* broadcast address received */
 249	PHYSTATUS = 0x00001000,	/* physical address received */
 250	RXFSD = 0x00000800,	/* first descriptor */
 251	RXLSD = 0x00000400,	/* last descriptor */
 252	ErrorSummary = 0x80,	/* error summary */
 253	RUNTPKT = 0x40,		/* runt packet received */
 254	LONGPKT = 0x20,		/* long packet received */
 255	FAE = 0x10,		/* frame align error */
 256	CRC = 0x08,		/* crc error */
 257	RXER = 0x04,		/* receive error */
 258};
 259
 260enum rx_desc_control_bits {
 261	RXIC = 0x00800000,	/* interrupt control */
 262	RBSShift = 0,
 263};
 264
 265enum tx_desc_status_bits {
 266	TXOWN = 0x80000000,	/* own bit */
 267	JABTO = 0x00004000,	/* jabber timeout */
 268	CSL = 0x00002000,	/* carrier sense lost */
 269	LC = 0x00001000,	/* late collision */
 270	EC = 0x00000800,	/* excessive collision */
 271	UDF = 0x00000400,	/* fifo underflow */
 272	DFR = 0x00000200,	/* deferred */
 273	HF = 0x00000100,	/* heartbeat fail */
 274	NCRMask = 0x000000ff,	/* collision retry count */
 275	NCRShift = 0,
 276};
 277
 278enum tx_desc_control_bits {
 279	TXIC = 0x80000000,	/* interrupt control */
 280	ETIControl = 0x40000000,	/* early transmit interrupt */
 281	TXLD = 0x20000000,	/* last descriptor */
 282	TXFD = 0x10000000,	/* first descriptor */
 283	CRCEnable = 0x08000000,	/* crc control */
 284	PADEnable = 0x04000000,	/* padding control */
 285	RetryTxLC = 0x02000000,	/* retry late collision */
 286	PKTSMask = 0x3ff800,	/* packet size bit21-11 */
 287	PKTSShift = 11,
 288	TBSMask = 0x000007ff,	/* transmit buffer bit 10-0 */
 289	TBSShift = 0,
 290};
 291
 292/* BootROM/EEPROM/MII Management Register */
 293#define MASK_MIIR_MII_READ       0x00000000
 294#define MASK_MIIR_MII_WRITE      0x00000008
 295#define MASK_MIIR_MII_MDO        0x00000004
 296#define MASK_MIIR_MII_MDI        0x00000002
 297#define MASK_MIIR_MII_MDC        0x00000001
 298
 299/* ST+OP+PHYAD+REGAD+TA */
 300#define OP_READ             0x6000	/* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
 301#define OP_WRITE            0x5002	/* ST:01+OP:01+PHYAD+REGAD+TA:10 */
 302
 303/* ------------------------------------------------------------------------- */
 304/*      Constants for Myson PHY                                              */
 305/* ------------------------------------------------------------------------- */
 306#define MysonPHYID      0xd0000302
 307/* 89-7-27 add, (begin) */
 308#define MysonPHYID0     0x0302
 309#define StatusRegister  18
 310#define SPEED100        0x0400	// bit10
 311#define FULLMODE        0x0800	// bit11
 312/* 89-7-27 add, (end) */
 313
 314/* ------------------------------------------------------------------------- */
 315/*      Constants for Seeq 80225 PHY                                         */
 316/* ------------------------------------------------------------------------- */
 317#define SeeqPHYID0      0x0016
 318
 319#define MIIRegister18   18
 320#define SPD_DET_100     0x80
 321#define DPLX_DET_FULL   0x40
 322
 323/* ------------------------------------------------------------------------- */
 324/*      Constants for Ahdoc 101 PHY                                          */
 325/* ------------------------------------------------------------------------- */
 326#define AhdocPHYID0     0x0022
 327
 328#define DiagnosticReg   18
 329#define DPLX_FULL       0x0800
 330#define Speed_100       0x0400
 331
 332/* 89/6/13 add, */
 333/* -------------------------------------------------------------------------- */
 334/*      Constants                                                             */
 335/* -------------------------------------------------------------------------- */
 336#define MarvellPHYID0           0x0141
 337#define LevelOnePHYID0		0x0013
 338
 339#define MII1000BaseTControlReg  9
 340#define MII1000BaseTStatusReg   10
 341#define SpecificReg		17
 342
 343/* for 1000BaseT Control Register */
 344#define PHYAbletoPerform1000FullDuplex  0x0200
 345#define PHYAbletoPerform1000HalfDuplex  0x0100
 346#define PHY1000AbilityMask              0x300
 347
 348// for phy specific status register, marvell phy.
 349#define SpeedMask       0x0c000
 350#define Speed_1000M     0x08000
 351#define Speed_100M      0x4000
 352#define Speed_10M       0
 353#define Full_Duplex     0x2000
 354
 355// 89/12/29 add, for phy specific status register, levelone phy, (begin)
 356#define LXT1000_100M    0x08000
 357#define LXT1000_1000M   0x0c000
 358#define LXT1000_Full    0x200
 359// 89/12/29 add, for phy specific status register, levelone phy, (end)
 360
 361/* for 3-in-1 case, BMCRSR register */
 362#define LinkIsUp2	0x00040000
 363
 364/* for PHY */
 365#define LinkIsUp        0x0004
 366
 367
 368struct netdev_private {
 369	/* Descriptor rings first for alignment. */
 370	struct fealnx_desc *rx_ring;
 371	struct fealnx_desc *tx_ring;
 372
 373	dma_addr_t rx_ring_dma;
 374	dma_addr_t tx_ring_dma;
 375
 376	spinlock_t lock;
 377
 378	/* Media monitoring timer. */
 379	struct timer_list timer;
 380
 381	/* Reset timer */
 382	struct timer_list reset_timer;
 383	int reset_timer_armed;
 384	unsigned long crvalue_sv;
 385	unsigned long imrvalue_sv;
 386
 387	/* Frequently used values: keep some adjacent for cache effect. */
 388	int flags;
 389	struct pci_dev *pci_dev;
 390	unsigned long crvalue;
 391	unsigned long bcrvalue;
 392	unsigned long imrvalue;
 393	struct fealnx_desc *cur_rx;
 394	struct fealnx_desc *lack_rxbuf;
 395	int really_rx_count;
 396	struct fealnx_desc *cur_tx;
 397	struct fealnx_desc *cur_tx_copy;
 398	int really_tx_count;
 399	int free_tx_count;
 400	unsigned int rx_buf_sz;	/* Based on MTU+slack. */
 401
 402	/* These values are keep track of the transceiver/media in use. */
 403	unsigned int linkok;
 404	unsigned int line_speed;
 405	unsigned int duplexmode;
 406	unsigned int default_port:4;	/* Last dev->if_port value. */
 407	unsigned int PHYType;
 408
 409	/* MII transceiver section. */
 410	int mii_cnt;		/* MII device addresses. */
 411	unsigned char phys[2];	/* MII device addresses. */
 412	struct mii_if_info mii;
 413	void __iomem *mem;
 414};
 415
 416
 417static int mdio_read(struct net_device *dev, int phy_id, int location);
 418static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 419static int netdev_open(struct net_device *dev);
 420static void getlinktype(struct net_device *dev);
 421static void getlinkstatus(struct net_device *dev);
 422static void netdev_timer(struct timer_list *t);
 423static void reset_timer(struct timer_list *t);
 424static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue);
 425static void init_ring(struct net_device *dev);
 426static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 427static irqreturn_t intr_handler(int irq, void *dev_instance);
 428static int netdev_rx(struct net_device *dev);
 429static void set_rx_mode(struct net_device *dev);
 430static void __set_rx_mode(struct net_device *dev);
 431static struct net_device_stats *get_stats(struct net_device *dev);
 432static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 433static const struct ethtool_ops netdev_ethtool_ops;
 434static int netdev_close(struct net_device *dev);
 435static void reset_rx_descriptors(struct net_device *dev);
 436static void reset_tx_descriptors(struct net_device *dev);
 437
 438static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
 439{
 440	int delay = 0x1000;
 441	iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
 442	while (--delay) {
 443		if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
 444			break;
 445	}
 446}
 447
 448
 449static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
 450{
 451	int delay = 0x1000;
 452	iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
 453	while (--delay) {
 454		if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
 455					    == (CR_R_RXSTOP+CR_R_TXSTOP) )
 456			break;
 457	}
 458}
 459
 460static const struct net_device_ops netdev_ops = {
 461	.ndo_open		= netdev_open,
 462	.ndo_stop		= netdev_close,
 463	.ndo_start_xmit		= start_tx,
 464	.ndo_get_stats 		= get_stats,
 465	.ndo_set_rx_mode	= set_rx_mode,
 466	.ndo_eth_ioctl		= mii_ioctl,
 467	.ndo_tx_timeout		= fealnx_tx_timeout,
 468	.ndo_set_mac_address 	= eth_mac_addr,
 469	.ndo_validate_addr	= eth_validate_addr,
 470};
 471
 472static int fealnx_init_one(struct pci_dev *pdev,
 473			   const struct pci_device_id *ent)
 474{
 475	struct netdev_private *np;
 476	int i, option, err, irq;
 477	static int card_idx = -1;
 478	char boardname[12];
 479	void __iomem *ioaddr;
 480	unsigned long len;
 481	unsigned int chip_id = ent->driver_data;
 482	struct net_device *dev;
 483	void *ring_space;
 484	dma_addr_t ring_dma;
 485	u8 addr[ETH_ALEN];
 486#ifdef USE_IO_OPS
 487	int bar = 0;
 488#else
 489	int bar = 1;
 490#endif
 491
 
 
 
 
 
 
 
 492	card_idx++;
 493	sprintf(boardname, "fealnx%d", card_idx);
 494
 495	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 496
 497	i = pci_enable_device(pdev);
 498	if (i) return i;
 499	pci_set_master(pdev);
 500
 501	len = pci_resource_len(pdev, bar);
 502	if (len < MIN_REGION_SIZE) {
 503		dev_err(&pdev->dev,
 504			   "region size %ld too small, aborting\n", len);
 505		return -ENODEV;
 506	}
 507
 508	i = pci_request_regions(pdev, boardname);
 509	if (i)
 510		return i;
 511
 512	irq = pdev->irq;
 513
 514	ioaddr = pci_iomap(pdev, bar, len);
 515	if (!ioaddr) {
 516		err = -ENOMEM;
 517		goto err_out_res;
 518	}
 519
 520	dev = alloc_etherdev(sizeof(struct netdev_private));
 521	if (!dev) {
 522		err = -ENOMEM;
 523		goto err_out_unmap;
 524	}
 525	SET_NETDEV_DEV(dev, &pdev->dev);
 526
 527	/* read ethernet id */
 528	for (i = 0; i < 6; ++i)
 529		addr[i] = ioread8(ioaddr + PAR0 + i);
 530	eth_hw_addr_set(dev, addr);
 531
 532	/* Reset the chip to erase previous misconfiguration. */
 533	iowrite32(0x00000001, ioaddr + BCR);
 534
 535	/* Make certain the descriptor lists are aligned. */
 536	np = netdev_priv(dev);
 537	np->mem = ioaddr;
 538	spin_lock_init(&np->lock);
 539	np->pci_dev = pdev;
 540	np->flags = skel_netdrv_tbl[chip_id].flags;
 541	pci_set_drvdata(pdev, dev);
 542	np->mii.dev = dev;
 543	np->mii.mdio_read = mdio_read;
 544	np->mii.mdio_write = mdio_write;
 545	np->mii.phy_id_mask = 0x1f;
 546	np->mii.reg_num_mask = 0x1f;
 547
 548	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
 549					GFP_KERNEL);
 550	if (!ring_space) {
 551		err = -ENOMEM;
 552		goto err_out_free_dev;
 553	}
 554	np->rx_ring = ring_space;
 555	np->rx_ring_dma = ring_dma;
 556
 557	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
 558					GFP_KERNEL);
 559	if (!ring_space) {
 560		err = -ENOMEM;
 561		goto err_out_free_rx;
 562	}
 563	np->tx_ring = ring_space;
 564	np->tx_ring_dma = ring_dma;
 565
 566	/* find the connected MII xcvrs */
 567	if (np->flags == HAS_MII_XCVR) {
 568		int phy, phy_idx = 0;
 569
 570		for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
 571			       phy++) {
 572			int mii_status = mdio_read(dev, phy, 1);
 573
 574			if (mii_status != 0xffff && mii_status != 0x0000) {
 575				np->phys[phy_idx++] = phy;
 576				dev_info(&pdev->dev,
 577				       "MII PHY found at address %d, status "
 578				       "0x%4.4x.\n", phy, mii_status);
 579				/* get phy type */
 580				{
 581					unsigned int data;
 582
 583					data = mdio_read(dev, np->phys[0], 2);
 584					if (data == SeeqPHYID0)
 585						np->PHYType = SeeqPHY;
 586					else if (data == AhdocPHYID0)
 587						np->PHYType = AhdocPHY;
 588					else if (data == MarvellPHYID0)
 589						np->PHYType = MarvellPHY;
 590					else if (data == MysonPHYID0)
 591						np->PHYType = Myson981;
 592					else if (data == LevelOnePHYID0)
 593						np->PHYType = LevelOnePHY;
 594					else
 595						np->PHYType = OtherPHY;
 596				}
 597			}
 598		}
 599
 600		np->mii_cnt = phy_idx;
 601		if (phy_idx == 0)
 602			dev_warn(&pdev->dev,
 603				"MII PHY not found -- this device may "
 604			       "not operate correctly.\n");
 605	} else {
 606		np->phys[0] = 32;
 607/* 89/6/23 add, (begin) */
 608		/* get phy type */
 609		if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
 610			np->PHYType = MysonPHY;
 611		else
 612			np->PHYType = OtherPHY;
 613	}
 614	np->mii.phy_id = np->phys[0];
 615
 616	if (dev->mem_start)
 617		option = dev->mem_start;
 618
 619	/* The lower four bits are the media type. */
 620	if (option > 0) {
 621		if (option & 0x200)
 622			np->mii.full_duplex = 1;
 623		np->default_port = option & 15;
 624	}
 625
 626	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
 627		np->mii.full_duplex = full_duplex[card_idx];
 628
 629	if (np->mii.full_duplex) {
 630		dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
 631/* 89/6/13 add, (begin) */
 632//      if (np->PHYType==MarvellPHY)
 633		if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
 634			unsigned int data;
 635
 636			data = mdio_read(dev, np->phys[0], 9);
 637			data = (data & 0xfcff) | 0x0200;
 638			mdio_write(dev, np->phys[0], 9, data);
 639		}
 640/* 89/6/13 add, (end) */
 641		if (np->flags == HAS_MII_XCVR)
 642			mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
 643		else
 644			iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
 645		np->mii.force_media = 1;
 646	}
 647
 648	dev->netdev_ops = &netdev_ops;
 649	dev->ethtool_ops = &netdev_ethtool_ops;
 650	dev->watchdog_timeo = TX_TIMEOUT;
 651
 652	err = register_netdev(dev);
 653	if (err)
 654		goto err_out_free_tx;
 655
 656	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 657	       dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
 658	       dev->dev_addr, irq);
 659
 660	return 0;
 661
 662err_out_free_tx:
 663	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 664			  np->tx_ring_dma);
 665err_out_free_rx:
 666	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 667			  np->rx_ring_dma);
 668err_out_free_dev:
 669	free_netdev(dev);
 670err_out_unmap:
 671	pci_iounmap(pdev, ioaddr);
 672err_out_res:
 673	pci_release_regions(pdev);
 674	return err;
 675}
 676
 677
 678static void fealnx_remove_one(struct pci_dev *pdev)
 679{
 680	struct net_device *dev = pci_get_drvdata(pdev);
 681
 682	if (dev) {
 683		struct netdev_private *np = netdev_priv(dev);
 684
 685		dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
 686				  np->tx_ring_dma);
 687		dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
 688				  np->rx_ring_dma);
 689		unregister_netdev(dev);
 690		pci_iounmap(pdev, np->mem);
 691		free_netdev(dev);
 692		pci_release_regions(pdev);
 693	} else
 694		printk(KERN_ERR "fealnx: remove for unknown device\n");
 695}
 696
 697
 698static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
 699{
 700	ulong miir;
 701	int i;
 702	unsigned int mask, data;
 703
 704	/* enable MII output */
 705	miir = (ulong) ioread32(miiport);
 706	miir &= 0xfffffff0;
 707
 708	miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
 709
 710	/* send 32 1's preamble */
 711	for (i = 0; i < 32; i++) {
 712		/* low MDC; MDO is already high (miir) */
 713		miir &= ~MASK_MIIR_MII_MDC;
 714		iowrite32(miir, miiport);
 715
 716		/* high MDC */
 717		miir |= MASK_MIIR_MII_MDC;
 718		iowrite32(miir, miiport);
 719	}
 720
 721	/* calculate ST+OP+PHYAD+REGAD+TA */
 722	data = opcode | (phyad << 7) | (regad << 2);
 723
 724	/* sent out */
 725	mask = 0x8000;
 726	while (mask) {
 727		/* low MDC, prepare MDO */
 728		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
 729		if (mask & data)
 730			miir |= MASK_MIIR_MII_MDO;
 731
 732		iowrite32(miir, miiport);
 733		/* high MDC */
 734		miir |= MASK_MIIR_MII_MDC;
 735		iowrite32(miir, miiport);
 736		udelay(30);
 737
 738		/* next */
 739		mask >>= 1;
 740		if (mask == 0x2 && opcode == OP_READ)
 741			miir &= ~MASK_MIIR_MII_WRITE;
 742	}
 743	return miir;
 744}
 745
 746
 747static int mdio_read(struct net_device *dev, int phyad, int regad)
 748{
 749	struct netdev_private *np = netdev_priv(dev);
 750	void __iomem *miiport = np->mem + MANAGEMENT;
 751	ulong miir;
 752	unsigned int mask, data;
 753
 754	miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
 755
 756	/* read data */
 757	mask = 0x8000;
 758	data = 0;
 759	while (mask) {
 760		/* low MDC */
 761		miir &= ~MASK_MIIR_MII_MDC;
 762		iowrite32(miir, miiport);
 763
 764		/* read MDI */
 765		miir = ioread32(miiport);
 766		if (miir & MASK_MIIR_MII_MDI)
 767			data |= mask;
 768
 769		/* high MDC, and wait */
 770		miir |= MASK_MIIR_MII_MDC;
 771		iowrite32(miir, miiport);
 772		udelay(30);
 773
 774		/* next */
 775		mask >>= 1;
 776	}
 777
 778	/* low MDC */
 779	miir &= ~MASK_MIIR_MII_MDC;
 780	iowrite32(miir, miiport);
 781
 782	return data & 0xffff;
 783}
 784
 785
 786static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
 787{
 788	struct netdev_private *np = netdev_priv(dev);
 789	void __iomem *miiport = np->mem + MANAGEMENT;
 790	ulong miir;
 791	unsigned int mask;
 792
 793	miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
 794
 795	/* write data */
 796	mask = 0x8000;
 797	while (mask) {
 798		/* low MDC, prepare MDO */
 799		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
 800		if (mask & data)
 801			miir |= MASK_MIIR_MII_MDO;
 802		iowrite32(miir, miiport);
 803
 804		/* high MDC */
 805		miir |= MASK_MIIR_MII_MDC;
 806		iowrite32(miir, miiport);
 807
 808		/* next */
 809		mask >>= 1;
 810	}
 811
 812	/* low MDC */
 813	miir &= ~MASK_MIIR_MII_MDC;
 814	iowrite32(miir, miiport);
 815}
 816
 817
 818static int netdev_open(struct net_device *dev)
 819{
 820	struct netdev_private *np = netdev_priv(dev);
 821	void __iomem *ioaddr = np->mem;
 822	const int irq = np->pci_dev->irq;
 823	int rc, i;
 824
 825	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
 826
 827	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 828	if (rc)
 829		return -EAGAIN;
 830
 831	for (i = 0; i < 3; i++)
 832		iowrite16(((const unsigned short *)dev->dev_addr)[i],
 833				ioaddr + PAR0 + i*2);
 834
 835	init_ring(dev);
 836
 837	iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
 838	iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
 839
 840	/* Initialize other registers. */
 841	/* Configure the PCI bus bursts and FIFO thresholds.
 842	   486: Set 8 longword burst.
 843	   586: no burst limit.
 844	   Burst length 5:3
 845	   0 0 0   1
 846	   0 0 1   4
 847	   0 1 0   8
 848	   0 1 1   16
 849	   1 0 0   32
 850	   1 0 1   64
 851	   1 1 0   128
 852	   1 1 1   256
 853	   Wait the specified 50 PCI cycles after a reset by initializing
 854	   Tx and Rx queues and the address filter list.
 855	   FIXME (Ueimor): optimistic for alpha + posted writes ? */
 856
 857	np->bcrvalue = 0x10;	/* little-endian, 8 burst length */
 858#ifdef __BIG_ENDIAN
 859	np->bcrvalue |= 0x04;	/* big-endian */
 860#endif
 861
 862#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
 863	if (boot_cpu_data.x86 <= 4)
 864		np->crvalue = 0xa00;
 865	else
 866#endif
 867		np->crvalue = 0xe00;	/* rx 128 burst length */
 868
 869
 870// 89/12/29 add,
 871// 90/1/16 modify,
 872//   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
 873	np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
 874	if (np->pci_dev->device == 0x891) {
 875		np->bcrvalue |= 0x200;	/* set PROG bit */
 876		np->crvalue |= CR_W_ENH;	/* set enhanced bit */
 877		np->imrvalue |= ETI;
 878	}
 879	iowrite32(np->bcrvalue, ioaddr + BCR);
 880
 881	if (dev->if_port == 0)
 882		dev->if_port = np->default_port;
 883
 884	iowrite32(0, ioaddr + RXPDR);
 885// 89/9/1 modify,
 886//   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
 887	np->crvalue |= 0x00e40001;	/* tx store and forward, tx/rx enable */
 888	np->mii.full_duplex = np->mii.force_media;
 889	getlinkstatus(dev);
 890	if (np->linkok)
 891		getlinktype(dev);
 892	__set_rx_mode(dev);
 893
 894	netif_start_queue(dev);
 895
 896	/* Clear and Enable interrupts by setting the interrupt mask. */
 897	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
 898	iowrite32(np->imrvalue, ioaddr + IMR);
 899
 900	if (debug)
 901		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
 902
 903	/* Set the timer to check for link beat. */
 904	timer_setup(&np->timer, netdev_timer, 0);
 905	np->timer.expires = RUN_AT(3 * HZ);
 906
 907	/* timer handler */
 908	add_timer(&np->timer);
 909
 910	timer_setup(&np->reset_timer, reset_timer, 0);
 911	np->reset_timer_armed = 0;
 912	return rc;
 913}
 914
 915
 916static void getlinkstatus(struct net_device *dev)
 917/* function: Routine will read MII Status Register to get link status.       */
 918/* input   : dev... pointer to the adapter block.                            */
 919/* output  : none.                                                           */
 920{
 921	struct netdev_private *np = netdev_priv(dev);
 922	unsigned int i, DelayTime = 0x1000;
 923
 924	np->linkok = 0;
 925
 926	if (np->PHYType == MysonPHY) {
 927		for (i = 0; i < DelayTime; ++i) {
 928			if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
 929				np->linkok = 1;
 930				return;
 931			}
 932			udelay(100);
 933		}
 934	} else {
 935		for (i = 0; i < DelayTime; ++i) {
 936			if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
 937				np->linkok = 1;
 938				return;
 939			}
 940			udelay(100);
 941		}
 942	}
 943}
 944
 945
 946static void getlinktype(struct net_device *dev)
 947{
 948	struct netdev_private *np = netdev_priv(dev);
 949
 950	if (np->PHYType == MysonPHY) {	/* 3-in-1 case */
 951		if (ioread32(np->mem + TCRRCR) & CR_R_FD)
 952			np->duplexmode = 2;	/* full duplex */
 953		else
 954			np->duplexmode = 1;	/* half duplex */
 955		if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
 956			np->line_speed = 1;	/* 10M */
 957		else
 958			np->line_speed = 2;	/* 100M */
 959	} else {
 960		if (np->PHYType == SeeqPHY) {	/* this PHY is SEEQ 80225 */
 961			unsigned int data;
 962
 963			data = mdio_read(dev, np->phys[0], MIIRegister18);
 964			if (data & SPD_DET_100)
 965				np->line_speed = 2;	/* 100M */
 966			else
 967				np->line_speed = 1;	/* 10M */
 968			if (data & DPLX_DET_FULL)
 969				np->duplexmode = 2;	/* full duplex mode */
 970			else
 971				np->duplexmode = 1;	/* half duplex mode */
 972		} else if (np->PHYType == AhdocPHY) {
 973			unsigned int data;
 974
 975			data = mdio_read(dev, np->phys[0], DiagnosticReg);
 976			if (data & Speed_100)
 977				np->line_speed = 2;	/* 100M */
 978			else
 979				np->line_speed = 1;	/* 10M */
 980			if (data & DPLX_FULL)
 981				np->duplexmode = 2;	/* full duplex mode */
 982			else
 983				np->duplexmode = 1;	/* half duplex mode */
 984		}
 985/* 89/6/13 add, (begin) */
 986		else if (np->PHYType == MarvellPHY) {
 987			unsigned int data;
 988
 989			data = mdio_read(dev, np->phys[0], SpecificReg);
 990			if (data & Full_Duplex)
 991				np->duplexmode = 2;	/* full duplex mode */
 992			else
 993				np->duplexmode = 1;	/* half duplex mode */
 994			data &= SpeedMask;
 995			if (data == Speed_1000M)
 996				np->line_speed = 3;	/* 1000M */
 997			else if (data == Speed_100M)
 998				np->line_speed = 2;	/* 100M */
 999			else
1000				np->line_speed = 1;	/* 10M */
1001		}
1002/* 89/6/13 add, (end) */
1003/* 89/7/27 add, (begin) */
1004		else if (np->PHYType == Myson981) {
1005			unsigned int data;
1006
1007			data = mdio_read(dev, np->phys[0], StatusRegister);
1008
1009			if (data & SPEED100)
1010				np->line_speed = 2;
1011			else
1012				np->line_speed = 1;
1013
1014			if (data & FULLMODE)
1015				np->duplexmode = 2;
1016			else
1017				np->duplexmode = 1;
1018		}
1019/* 89/7/27 add, (end) */
1020/* 89/12/29 add */
1021		else if (np->PHYType == LevelOnePHY) {
1022			unsigned int data;
1023
1024			data = mdio_read(dev, np->phys[0], SpecificReg);
1025			if (data & LXT1000_Full)
1026				np->duplexmode = 2;	/* full duplex mode */
1027			else
1028				np->duplexmode = 1;	/* half duplex mode */
1029			data &= SpeedMask;
1030			if (data == LXT1000_1000M)
1031				np->line_speed = 3;	/* 1000M */
1032			else if (data == LXT1000_100M)
1033				np->line_speed = 2;	/* 100M */
1034			else
1035				np->line_speed = 1;	/* 10M */
1036		}
1037		np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1038		if (np->line_speed == 1)
1039			np->crvalue |= CR_W_PS10;
1040		else if (np->line_speed == 3)
1041			np->crvalue |= CR_W_PS1000;
1042		if (np->duplexmode == 2)
1043			np->crvalue |= CR_W_FD;
1044	}
1045}
1046
1047
1048/* Take lock before calling this */
1049static void allocate_rx_buffers(struct net_device *dev)
1050{
1051	struct netdev_private *np = netdev_priv(dev);
1052
1053	/*  allocate skb for rx buffers */
1054	while (np->really_rx_count != RX_RING_SIZE) {
1055		struct sk_buff *skb;
1056
1057		skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1058		if (skb == NULL)
1059			break;	/* Better luck next round. */
1060
1061		while (np->lack_rxbuf->skbuff)
1062			np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1063
1064		np->lack_rxbuf->skbuff = skb;
1065		np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
1066							skb->data,
1067							np->rx_buf_sz,
1068							DMA_FROM_DEVICE);
1069		np->lack_rxbuf->status = RXOWN;
1070		++np->really_rx_count;
1071	}
1072}
1073
1074
1075static void netdev_timer(struct timer_list *t)
1076{
1077	struct netdev_private *np = from_timer(np, t, timer);
1078	struct net_device *dev = np->mii.dev;
1079	void __iomem *ioaddr = np->mem;
1080	int old_crvalue = np->crvalue;
1081	unsigned int old_linkok = np->linkok;
1082	unsigned long flags;
1083
1084	if (debug)
1085		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1086		       "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1087		       ioread32(ioaddr + TCRRCR));
1088
1089	spin_lock_irqsave(&np->lock, flags);
1090
1091	if (np->flags == HAS_MII_XCVR) {
1092		getlinkstatus(dev);
1093		if ((old_linkok == 0) && (np->linkok == 1)) {	/* we need to detect the media type again */
1094			getlinktype(dev);
1095			if (np->crvalue != old_crvalue) {
1096				stop_nic_rxtx(ioaddr, np->crvalue);
1097				iowrite32(np->crvalue, ioaddr + TCRRCR);
1098			}
1099		}
1100	}
1101
1102	allocate_rx_buffers(dev);
1103
1104	spin_unlock_irqrestore(&np->lock, flags);
1105
1106	np->timer.expires = RUN_AT(10 * HZ);
1107	add_timer(&np->timer);
1108}
1109
1110
1111/* Take lock before calling */
1112/* Reset chip and disable rx, tx and interrupts */
1113static void reset_and_disable_rxtx(struct net_device *dev)
1114{
1115	struct netdev_private *np = netdev_priv(dev);
1116	void __iomem *ioaddr = np->mem;
1117	int delay=51;
1118
1119	/* Reset the chip's Tx and Rx processes. */
1120	stop_nic_rxtx(ioaddr, 0);
1121
1122	/* Disable interrupts by clearing the interrupt mask. */
1123	iowrite32(0, ioaddr + IMR);
1124
1125	/* Reset the chip to erase previous misconfiguration. */
1126	iowrite32(0x00000001, ioaddr + BCR);
1127
1128	/* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1129	   We surely wait too long (address+data phase). Who cares? */
1130	while (--delay) {
1131		ioread32(ioaddr + BCR);
1132		rmb();
1133	}
1134}
1135
1136
1137/* Take lock before calling */
1138/* Restore chip after reset */
1139static void enable_rxtx(struct net_device *dev)
1140{
1141	struct netdev_private *np = netdev_priv(dev);
1142	void __iomem *ioaddr = np->mem;
1143
1144	reset_rx_descriptors(dev);
1145
1146	iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1147		ioaddr + TXLBA);
1148	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1149		ioaddr + RXLBA);
1150
1151	iowrite32(np->bcrvalue, ioaddr + BCR);
1152
1153	iowrite32(0, ioaddr + RXPDR);
1154	__set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1155
1156	/* Clear and Enable interrupts by setting the interrupt mask. */
1157	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1158	iowrite32(np->imrvalue, ioaddr + IMR);
1159
1160	iowrite32(0, ioaddr + TXPDR);
1161}
1162
1163
1164static void reset_timer(struct timer_list *t)
1165{
1166	struct netdev_private *np = from_timer(np, t, reset_timer);
1167	struct net_device *dev = np->mii.dev;
1168	unsigned long flags;
1169
1170	printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1171
1172	spin_lock_irqsave(&np->lock, flags);
1173	np->crvalue = np->crvalue_sv;
1174	np->imrvalue = np->imrvalue_sv;
1175
1176	reset_and_disable_rxtx(dev);
1177	/* works for me without this:
1178	reset_tx_descriptors(dev); */
1179	enable_rxtx(dev);
1180	netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1181
1182	np->reset_timer_armed = 0;
1183
1184	spin_unlock_irqrestore(&np->lock, flags);
1185}
1186
1187
1188static void fealnx_tx_timeout(struct net_device *dev, unsigned int txqueue)
1189{
1190	struct netdev_private *np = netdev_priv(dev);
1191	void __iomem *ioaddr = np->mem;
1192	unsigned long flags;
1193	int i;
1194
1195	printk(KERN_WARNING
1196	       "%s: Transmit timed out, status %8.8x, resetting...\n",
1197	       dev->name, ioread32(ioaddr + ISR));
1198
1199	{
1200		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
1201		for (i = 0; i < RX_RING_SIZE; i++)
1202			printk(KERN_CONT " %8.8x",
1203			       (unsigned int) np->rx_ring[i].status);
1204		printk(KERN_CONT "\n");
1205		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
1206		for (i = 0; i < TX_RING_SIZE; i++)
1207			printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1208		printk(KERN_CONT "\n");
1209	}
1210
1211	spin_lock_irqsave(&np->lock, flags);
1212
1213	reset_and_disable_rxtx(dev);
1214	reset_tx_descriptors(dev);
1215	enable_rxtx(dev);
1216
1217	spin_unlock_irqrestore(&np->lock, flags);
1218
1219	netif_trans_update(dev); /* prevent tx timeout */
1220	dev->stats.tx_errors++;
1221	netif_wake_queue(dev); /* or .._start_.. ?? */
1222}
1223
1224
1225/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1226static void init_ring(struct net_device *dev)
1227{
1228	struct netdev_private *np = netdev_priv(dev);
1229	int i;
1230
1231	/* initialize rx variables */
1232	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1233	np->cur_rx = &np->rx_ring[0];
1234	np->lack_rxbuf = np->rx_ring;
1235	np->really_rx_count = 0;
1236
1237	/* initial rx descriptors. */
1238	for (i = 0; i < RX_RING_SIZE; i++) {
1239		np->rx_ring[i].status = 0;
1240		np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1241		np->rx_ring[i].next_desc = np->rx_ring_dma +
1242			(i + 1)*sizeof(struct fealnx_desc);
1243		np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1244		np->rx_ring[i].skbuff = NULL;
1245	}
1246
1247	/* for the last rx descriptor */
1248	np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1249	np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1250
1251	/* allocate skb for rx buffers */
1252	for (i = 0; i < RX_RING_SIZE; i++) {
1253		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1254
1255		if (skb == NULL) {
1256			np->lack_rxbuf = &np->rx_ring[i];
1257			break;
1258		}
1259
1260		++np->really_rx_count;
1261		np->rx_ring[i].skbuff = skb;
1262		np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
1263						       skb->data,
1264						       np->rx_buf_sz,
1265						       DMA_FROM_DEVICE);
1266		np->rx_ring[i].status = RXOWN;
1267		np->rx_ring[i].control |= RXIC;
1268	}
1269
1270	/* initialize tx variables */
1271	np->cur_tx = &np->tx_ring[0];
1272	np->cur_tx_copy = &np->tx_ring[0];
1273	np->really_tx_count = 0;
1274	np->free_tx_count = TX_RING_SIZE;
1275
1276	for (i = 0; i < TX_RING_SIZE; i++) {
1277		np->tx_ring[i].status = 0;
1278		/* do we need np->tx_ring[i].control = XXX; ?? */
1279		np->tx_ring[i].next_desc = np->tx_ring_dma +
1280			(i + 1)*sizeof(struct fealnx_desc);
1281		np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1282		np->tx_ring[i].skbuff = NULL;
1283	}
1284
1285	/* for the last tx descriptor */
1286	np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1287	np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1288}
1289
1290
1291static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1292{
1293	struct netdev_private *np = netdev_priv(dev);
1294	unsigned long flags;
1295
1296	spin_lock_irqsave(&np->lock, flags);
1297
1298	np->cur_tx_copy->skbuff = skb;
1299
1300#define one_buffer
1301#define BPT 1022
1302#if defined(one_buffer)
1303	np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
1304						 skb->len, DMA_TO_DEVICE);
1305	np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1306	np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1307	np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1308// 89/12/29 add,
1309	if (np->pci_dev->device == 0x891)
1310		np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1311	np->cur_tx_copy->status = TXOWN;
1312	np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1313	--np->free_tx_count;
1314#elif defined(two_buffer)
1315	if (skb->len > BPT) {
1316		struct fealnx_desc *next;
1317
1318		/* for the first descriptor */
1319		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1320							 skb->data, BPT,
1321							 DMA_TO_DEVICE);
1322		np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1323		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1324		np->cur_tx_copy->control |= (BPT << TBSShift);	/* buffer size */
1325
1326		/* for the last descriptor */
1327		next = np->cur_tx_copy->next_desc_logical;
1328		next->skbuff = skb;
1329		next->control = TXIC | TXLD | CRCEnable | PADEnable;
1330		next->control |= (skb->len << PKTSShift);	/* pkt size */
1331		next->control |= ((skb->len - BPT) << TBSShift);	/* buf size */
1332// 89/12/29 add,
1333		if (np->pci_dev->device == 0x891)
1334			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1335		next->buffer = dma_map_single(&ep->pci_dev->dev,
1336					      skb->data + BPT, skb->len - BPT,
1337					      DMA_TO_DEVICE);
1338
1339		next->status = TXOWN;
1340		np->cur_tx_copy->status = TXOWN;
1341
1342		np->cur_tx_copy = next->next_desc_logical;
1343		np->free_tx_count -= 2;
1344	} else {
1345		np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
1346							 skb->data, skb->len,
1347							 DMA_TO_DEVICE);
1348		np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1349		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1350		np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1351// 89/12/29 add,
1352		if (np->pci_dev->device == 0x891)
1353			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1354		np->cur_tx_copy->status = TXOWN;
1355		np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1356		--np->free_tx_count;
1357	}
1358#endif
1359
1360	if (np->free_tx_count < 2)
1361		netif_stop_queue(dev);
1362	++np->really_tx_count;
1363	iowrite32(0, np->mem + TXPDR);
1364
1365	spin_unlock_irqrestore(&np->lock, flags);
1366	return NETDEV_TX_OK;
1367}
1368
1369
1370/* Take lock before calling */
1371/* Chip probably hosed tx ring. Clean up. */
1372static void reset_tx_descriptors(struct net_device *dev)
1373{
1374	struct netdev_private *np = netdev_priv(dev);
1375	struct fealnx_desc *cur;
1376	int i;
1377
1378	/* initialize tx variables */
1379	np->cur_tx = &np->tx_ring[0];
1380	np->cur_tx_copy = &np->tx_ring[0];
1381	np->really_tx_count = 0;
1382	np->free_tx_count = TX_RING_SIZE;
1383
1384	for (i = 0; i < TX_RING_SIZE; i++) {
1385		cur = &np->tx_ring[i];
1386		if (cur->skbuff) {
1387			dma_unmap_single(&np->pci_dev->dev, cur->buffer,
1388					 cur->skbuff->len, DMA_TO_DEVICE);
1389			dev_kfree_skb_any(cur->skbuff);
1390			cur->skbuff = NULL;
1391		}
1392		cur->status = 0;
1393		cur->control = 0;	/* needed? */
1394		/* probably not needed. We do it for purely paranoid reasons */
1395		cur->next_desc = np->tx_ring_dma +
1396			(i + 1)*sizeof(struct fealnx_desc);
1397		cur->next_desc_logical = &np->tx_ring[i + 1];
1398	}
1399	/* for the last tx descriptor */
1400	np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1401	np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1402}
1403
1404
1405/* Take lock and stop rx before calling this */
1406static void reset_rx_descriptors(struct net_device *dev)
1407{
1408	struct netdev_private *np = netdev_priv(dev);
1409	struct fealnx_desc *cur = np->cur_rx;
1410	int i;
1411
1412	allocate_rx_buffers(dev);
1413
1414	for (i = 0; i < RX_RING_SIZE; i++) {
1415		if (cur->skbuff)
1416			cur->status = RXOWN;
1417		cur = cur->next_desc_logical;
1418	}
1419
1420	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1421		np->mem + RXLBA);
1422}
1423
1424
1425/* The interrupt handler does all of the Rx thread work and cleans up
1426   after the Tx thread. */
1427static irqreturn_t intr_handler(int irq, void *dev_instance)
1428{
1429	struct net_device *dev = (struct net_device *) dev_instance;
1430	struct netdev_private *np = netdev_priv(dev);
1431	void __iomem *ioaddr = np->mem;
1432	long boguscnt = max_interrupt_work;
1433	unsigned int num_tx = 0;
1434	int handled = 0;
1435
1436	spin_lock(&np->lock);
1437
1438	iowrite32(0, ioaddr + IMR);
1439
1440	do {
1441		u32 intr_status = ioread32(ioaddr + ISR);
1442
1443		/* Acknowledge all of the current interrupt sources ASAP. */
1444		iowrite32(intr_status, ioaddr + ISR);
1445
1446		if (debug)
1447			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1448			       intr_status);
1449
1450		if (!(intr_status & np->imrvalue))
1451			break;
1452
1453		handled = 1;
1454
1455// 90/1/16 delete,
1456//
1457//      if (intr_status & FBE)
1458//      {   /* fatal error */
1459//          stop_nic_tx(ioaddr, 0);
1460//          stop_nic_rx(ioaddr, 0);
1461//          break;
1462//      };
1463
1464		if (intr_status & TUNF)
1465			iowrite32(0, ioaddr + TXPDR);
1466
1467		if (intr_status & CNTOVF) {
1468			/* missed pkts */
1469			dev->stats.rx_missed_errors +=
1470				ioread32(ioaddr + TALLY) & 0x7fff;
1471
1472			/* crc error */
1473			dev->stats.rx_crc_errors +=
1474			    (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1475		}
1476
1477		if (intr_status & (RI | RBU)) {
1478			if (intr_status & RI)
1479				netdev_rx(dev);
1480			else {
1481				stop_nic_rx(ioaddr, np->crvalue);
1482				reset_rx_descriptors(dev);
1483				iowrite32(np->crvalue, ioaddr + TCRRCR);
1484			}
1485		}
1486
1487		while (np->really_tx_count) {
1488			long tx_status = np->cur_tx->status;
1489			long tx_control = np->cur_tx->control;
1490
1491			if (!(tx_control & TXLD)) {	/* this pkt is combined by two tx descriptors */
1492				struct fealnx_desc *next;
1493
1494				next = np->cur_tx->next_desc_logical;
1495				tx_status = next->status;
1496				tx_control = next->control;
1497			}
1498
1499			if (tx_status & TXOWN)
1500				break;
1501
1502			if (!(np->crvalue & CR_W_ENH)) {
1503				if (tx_status & (CSL | LC | EC | UDF | HF)) {
1504					dev->stats.tx_errors++;
1505					if (tx_status & EC)
1506						dev->stats.tx_aborted_errors++;
1507					if (tx_status & CSL)
1508						dev->stats.tx_carrier_errors++;
1509					if (tx_status & LC)
1510						dev->stats.tx_window_errors++;
1511					if (tx_status & UDF)
1512						dev->stats.tx_fifo_errors++;
1513					if ((tx_status & HF) && np->mii.full_duplex == 0)
1514						dev->stats.tx_heartbeat_errors++;
1515
1516				} else {
1517					dev->stats.tx_bytes +=
1518					    ((tx_control & PKTSMask) >> PKTSShift);
1519
1520					dev->stats.collisions +=
1521					    ((tx_status & NCRMask) >> NCRShift);
1522					dev->stats.tx_packets++;
1523				}
1524			} else {
1525				dev->stats.tx_bytes +=
1526				    ((tx_control & PKTSMask) >> PKTSShift);
1527				dev->stats.tx_packets++;
1528			}
1529
1530			/* Free the original skb. */
1531			dma_unmap_single(&np->pci_dev->dev,
1532					 np->cur_tx->buffer,
1533					 np->cur_tx->skbuff->len,
1534					 DMA_TO_DEVICE);
1535			dev_consume_skb_irq(np->cur_tx->skbuff);
1536			np->cur_tx->skbuff = NULL;
1537			--np->really_tx_count;
1538			if (np->cur_tx->control & TXLD) {
1539				np->cur_tx = np->cur_tx->next_desc_logical;
1540				++np->free_tx_count;
1541			} else {
1542				np->cur_tx = np->cur_tx->next_desc_logical;
1543				np->cur_tx = np->cur_tx->next_desc_logical;
1544				np->free_tx_count += 2;
1545			}
1546			num_tx++;
1547		}		/* end of for loop */
1548
1549		if (num_tx && np->free_tx_count >= 2)
1550			netif_wake_queue(dev);
1551
1552		/* read transmit status for enhanced mode only */
1553		if (np->crvalue & CR_W_ENH) {
1554			long data;
1555
1556			data = ioread32(ioaddr + TSR);
1557			dev->stats.tx_errors += (data & 0xff000000) >> 24;
1558			dev->stats.tx_aborted_errors +=
1559				(data & 0xff000000) >> 24;
1560			dev->stats.tx_window_errors +=
1561				(data & 0x00ff0000) >> 16;
1562			dev->stats.collisions += (data & 0x0000ffff);
1563		}
1564
1565		if (--boguscnt < 0) {
1566			printk(KERN_WARNING "%s: Too much work at interrupt, "
1567			       "status=0x%4.4x.\n", dev->name, intr_status);
1568			if (!np->reset_timer_armed) {
1569				np->reset_timer_armed = 1;
1570				np->reset_timer.expires = RUN_AT(HZ/2);
1571				add_timer(&np->reset_timer);
1572				stop_nic_rxtx(ioaddr, 0);
1573				netif_stop_queue(dev);
1574				/* or netif_tx_disable(dev); ?? */
1575				/* Prevent other paths from enabling tx,rx,intrs */
1576				np->crvalue_sv = np->crvalue;
1577				np->imrvalue_sv = np->imrvalue;
1578				np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1579				np->imrvalue = 0;
1580			}
1581
1582			break;
1583		}
1584	} while (1);
1585
1586	/* read the tally counters */
1587	/* missed pkts */
1588	dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1589
1590	/* crc error */
1591	dev->stats.rx_crc_errors +=
1592		(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1593
1594	if (debug)
1595		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1596		       dev->name, ioread32(ioaddr + ISR));
1597
1598	iowrite32(np->imrvalue, ioaddr + IMR);
1599
1600	spin_unlock(&np->lock);
1601
1602	return IRQ_RETVAL(handled);
1603}
1604
1605
1606/* This routine is logically part of the interrupt handler, but separated
1607   for clarity and better register allocation. */
1608static int netdev_rx(struct net_device *dev)
1609{
1610	struct netdev_private *np = netdev_priv(dev);
1611	void __iomem *ioaddr = np->mem;
1612
1613	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1614	while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1615		s32 rx_status = np->cur_rx->status;
1616
1617		if (np->really_rx_count == 0)
1618			break;
1619
1620		if (debug)
1621			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
1622
1623		if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
1624		    (rx_status & ErrorSummary)) {
1625			if (rx_status & ErrorSummary) {	/* there was a fatal error */
1626				if (debug)
1627					printk(KERN_DEBUG
1628					       "%s: Receive error, Rx status %8.8x.\n",
1629					       dev->name, rx_status);
1630
1631				dev->stats.rx_errors++;	/* end of a packet. */
1632				if (rx_status & (LONGPKT | RUNTPKT))
1633					dev->stats.rx_length_errors++;
1634				if (rx_status & RXER)
1635					dev->stats.rx_frame_errors++;
1636				if (rx_status & CRC)
1637					dev->stats.rx_crc_errors++;
1638			} else {
1639				int need_to_reset = 0;
1640				int desno = 0;
1641
1642				if (rx_status & RXFSD) {	/* this pkt is too long, over one rx buffer */
1643					struct fealnx_desc *cur;
1644
1645					/* check this packet is received completely? */
1646					cur = np->cur_rx;
1647					while (desno <= np->really_rx_count) {
1648						++desno;
1649						if ((!(cur->status & RXOWN)) &&
1650						    (cur->status & RXLSD))
1651							break;
1652						/* goto next rx descriptor */
1653						cur = cur->next_desc_logical;
1654					}
1655					if (desno > np->really_rx_count)
1656						need_to_reset = 1;
1657				} else	/* RXLSD did not find, something error */
1658					need_to_reset = 1;
1659
1660				if (need_to_reset == 0) {
1661					int i;
1662
1663					dev->stats.rx_length_errors++;
1664
1665					/* free all rx descriptors related this long pkt */
1666					for (i = 0; i < desno; ++i) {
1667						if (!np->cur_rx->skbuff) {
1668							printk(KERN_DEBUG
1669								"%s: I'm scared\n", dev->name);
1670							break;
1671						}
1672						np->cur_rx->status = RXOWN;
1673						np->cur_rx = np->cur_rx->next_desc_logical;
1674					}
1675					continue;
1676				} else {        /* rx error, need to reset this chip */
1677					stop_nic_rx(ioaddr, np->crvalue);
1678					reset_rx_descriptors(dev);
1679					iowrite32(np->crvalue, ioaddr + TCRRCR);
1680				}
1681				break;	/* exit the while loop */
1682			}
1683		} else {	/* this received pkt is ok */
1684
1685			struct sk_buff *skb;
1686			/* Omit the four octet CRC from the length. */
1687			short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1688
1689#ifndef final_version
1690			if (debug)
1691				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1692				       " status %x.\n", pkt_len, rx_status);
1693#endif
1694
1695			/* Check if the packet is long enough to accept without copying
1696			   to a minimally-sized skbuff. */
1697			if (pkt_len < rx_copybreak &&
1698			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1699				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1700				dma_sync_single_for_cpu(&np->pci_dev->dev,
1701							np->cur_rx->buffer,
1702							np->rx_buf_sz,
1703							DMA_FROM_DEVICE);
1704				/* Call copy + cksum if available. */
1705
1706#if ! defined(__alpha__)
1707				skb_copy_to_linear_data(skb,
1708					np->cur_rx->skbuff->data, pkt_len);
1709				skb_put(skb, pkt_len);
1710#else
1711				skb_put_data(skb, np->cur_rx->skbuff->data,
1712					     pkt_len);
1713#endif
1714				dma_sync_single_for_device(&np->pci_dev->dev,
1715							   np->cur_rx->buffer,
1716							   np->rx_buf_sz,
1717							   DMA_FROM_DEVICE);
1718			} else {
1719				dma_unmap_single(&np->pci_dev->dev,
1720						 np->cur_rx->buffer,
1721						 np->rx_buf_sz,
1722						 DMA_FROM_DEVICE);
1723				skb_put(skb = np->cur_rx->skbuff, pkt_len);
1724				np->cur_rx->skbuff = NULL;
1725				--np->really_rx_count;
1726			}
1727			skb->protocol = eth_type_trans(skb, dev);
1728			netif_rx(skb);
1729			dev->stats.rx_packets++;
1730			dev->stats.rx_bytes += pkt_len;
1731		}
1732
1733		np->cur_rx = np->cur_rx->next_desc_logical;
1734	}			/* end of while loop */
1735
1736	/*  allocate skb for rx buffers */
1737	allocate_rx_buffers(dev);
1738
1739	return 0;
1740}
1741
1742
1743static struct net_device_stats *get_stats(struct net_device *dev)
1744{
1745	struct netdev_private *np = netdev_priv(dev);
1746	void __iomem *ioaddr = np->mem;
1747
1748	/* The chip only need report frame silently dropped. */
1749	if (netif_running(dev)) {
1750		dev->stats.rx_missed_errors +=
1751			ioread32(ioaddr + TALLY) & 0x7fff;
1752		dev->stats.rx_crc_errors +=
1753			(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1754	}
1755
1756	return &dev->stats;
1757}
1758
1759
1760/* for dev->set_multicast_list */
1761static void set_rx_mode(struct net_device *dev)
1762{
1763	spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1764	unsigned long flags;
1765	spin_lock_irqsave(lp, flags);
1766	__set_rx_mode(dev);
1767	spin_unlock_irqrestore(lp, flags);
1768}
1769
1770
1771/* Take lock before calling */
1772static void __set_rx_mode(struct net_device *dev)
1773{
1774	struct netdev_private *np = netdev_priv(dev);
1775	void __iomem *ioaddr = np->mem;
1776	u32 mc_filter[2];	/* Multicast hash filter */
1777	u32 rx_mode;
1778
1779	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1780		memset(mc_filter, 0xff, sizeof(mc_filter));
1781		rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1782	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1783		   (dev->flags & IFF_ALLMULTI)) {
1784		/* Too many to match, or accept all multicasts. */
1785		memset(mc_filter, 0xff, sizeof(mc_filter));
1786		rx_mode = CR_W_AB | CR_W_AM;
1787	} else {
1788		struct netdev_hw_addr *ha;
1789
1790		memset(mc_filter, 0, sizeof(mc_filter));
1791		netdev_for_each_mc_addr(ha, dev) {
1792			unsigned int bit;
1793			bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1794			mc_filter[bit >> 5] |= (1 << bit);
1795		}
1796		rx_mode = CR_W_AB | CR_W_AM;
1797	}
1798
1799	stop_nic_rxtx(ioaddr, np->crvalue);
1800
1801	iowrite32(mc_filter[0], ioaddr + MAR0);
1802	iowrite32(mc_filter[1], ioaddr + MAR1);
1803	np->crvalue &= ~CR_W_RXMODEMASK;
1804	np->crvalue |= rx_mode;
1805	iowrite32(np->crvalue, ioaddr + TCRRCR);
1806}
1807
1808static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1809{
1810	struct netdev_private *np = netdev_priv(dev);
1811
1812	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1813	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 
1814}
1815
1816static int netdev_get_link_ksettings(struct net_device *dev,
1817				     struct ethtool_link_ksettings *cmd)
1818{
1819	struct netdev_private *np = netdev_priv(dev);
1820
1821	spin_lock_irq(&np->lock);
1822	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1823	spin_unlock_irq(&np->lock);
1824
1825	return 0;
1826}
1827
1828static int netdev_set_link_ksettings(struct net_device *dev,
1829				     const struct ethtool_link_ksettings *cmd)
1830{
1831	struct netdev_private *np = netdev_priv(dev);
1832	int rc;
1833
1834	spin_lock_irq(&np->lock);
1835	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1836	spin_unlock_irq(&np->lock);
1837
1838	return rc;
1839}
1840
1841static int netdev_nway_reset(struct net_device *dev)
1842{
1843	struct netdev_private *np = netdev_priv(dev);
1844	return mii_nway_restart(&np->mii);
1845}
1846
1847static u32 netdev_get_link(struct net_device *dev)
1848{
1849	struct netdev_private *np = netdev_priv(dev);
1850	return mii_link_ok(&np->mii);
1851}
1852
1853static u32 netdev_get_msglevel(struct net_device *dev)
1854{
1855	return debug;
1856}
1857
1858static void netdev_set_msglevel(struct net_device *dev, u32 value)
1859{
1860	debug = value;
1861}
1862
1863static const struct ethtool_ops netdev_ethtool_ops = {
1864	.get_drvinfo		= netdev_get_drvinfo,
1865	.nway_reset		= netdev_nway_reset,
1866	.get_link		= netdev_get_link,
1867	.get_msglevel		= netdev_get_msglevel,
1868	.set_msglevel		= netdev_set_msglevel,
1869	.get_link_ksettings	= netdev_get_link_ksettings,
1870	.set_link_ksettings	= netdev_set_link_ksettings,
1871};
1872
1873static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1874{
1875	struct netdev_private *np = netdev_priv(dev);
1876	int rc;
1877
1878	if (!netif_running(dev))
1879		return -EINVAL;
1880
1881	spin_lock_irq(&np->lock);
1882	rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1883	spin_unlock_irq(&np->lock);
1884
1885	return rc;
1886}
1887
1888
1889static int netdev_close(struct net_device *dev)
1890{
1891	struct netdev_private *np = netdev_priv(dev);
1892	void __iomem *ioaddr = np->mem;
1893	int i;
1894
1895	netif_stop_queue(dev);
1896
1897	/* Disable interrupts by clearing the interrupt mask. */
1898	iowrite32(0x0000, ioaddr + IMR);
1899
1900	/* Stop the chip's Tx and Rx processes. */
1901	stop_nic_rxtx(ioaddr, 0);
1902
1903	del_timer_sync(&np->timer);
1904	del_timer_sync(&np->reset_timer);
1905
1906	free_irq(np->pci_dev->irq, dev);
1907
1908	/* Free all the skbuffs in the Rx queue. */
1909	for (i = 0; i < RX_RING_SIZE; i++) {
1910		struct sk_buff *skb = np->rx_ring[i].skbuff;
1911
1912		np->rx_ring[i].status = 0;
1913		if (skb) {
1914			dma_unmap_single(&np->pci_dev->dev,
1915					 np->rx_ring[i].buffer, np->rx_buf_sz,
1916					 DMA_FROM_DEVICE);
1917			dev_kfree_skb(skb);
1918			np->rx_ring[i].skbuff = NULL;
1919		}
1920	}
1921
1922	for (i = 0; i < TX_RING_SIZE; i++) {
1923		struct sk_buff *skb = np->tx_ring[i].skbuff;
1924
1925		if (skb) {
1926			dma_unmap_single(&np->pci_dev->dev,
1927					 np->tx_ring[i].buffer, skb->len,
1928					 DMA_TO_DEVICE);
1929			dev_kfree_skb(skb);
1930			np->tx_ring[i].skbuff = NULL;
1931		}
1932	}
1933
1934	return 0;
1935}
1936
1937static const struct pci_device_id fealnx_pci_tbl[] = {
1938	{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1939	{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1940	{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1941	{} /* terminate list */
1942};
1943MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1944
1945
1946static struct pci_driver fealnx_driver = {
1947	.name		= "fealnx",
1948	.id_table	= fealnx_pci_tbl,
1949	.probe		= fealnx_init_one,
1950	.remove		= fealnx_remove_one,
1951};
1952
1953module_pci_driver(fealnx_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.17
   1/*
   2	Written 1998-2000 by Donald Becker.
   3
   4	This software may be used and distributed according to the terms of
   5	the GNU General Public License (GPL), incorporated herein by reference.
   6	Drivers based on or derived from this code fall under the GPL and must
   7	retain the authorship, copyright and license notice.  This file is not
   8	a complete program and may only be used when the entire operating
   9	system is licensed under the GPL.
  10
  11	The author may be reached as becker@scyld.com, or C/O
  12	Scyld Computing Corporation
  13	410 Severn Ave., Suite 210
  14	Annapolis MD 21403
  15
  16	Support information and updates available at
  17	http://www.scyld.com/network/pci-skeleton.html
  18
  19	Linux kernel updates:
  20
  21	Version 2.51, Nov 17, 2001 (jgarzik):
  22	- Add ethtool support
  23	- Replace some MII-related magic numbers with constants
  24
  25*/
  26
  27#define DRV_NAME	"fealnx"
  28#define DRV_VERSION	"2.52"
  29#define DRV_RELDATE	"Sep-11-2006"
  30
  31static int debug;		/* 1-> print debug message */
  32static int max_interrupt_work = 20;
  33
  34/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
  35static int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
  38/* Setting to > 1518 effectively disables this feature.          */
  39static int rx_copybreak;
  40
  41/* Used to pass the media type, etc.                            */
  42/* Both 'options[]' and 'full_duplex[]' should exist for driver */
  43/* interoperability.                                            */
  44/* The media type is usually passed in 'options[]'.             */
  45#define MAX_UNITS 8		/* More are supported, limit only on options */
  46static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
  47static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
  48
  49/* Operational parameters that are set at compile time.                 */
  50/* Keep the ring sizes a power of two for compile efficiency.           */
  51/* The compiler will convert <unsigned>'%'<2^N> into a bit mask.        */
  52/* Making the Tx ring too large decreases the effectiveness of channel  */
  53/* bonding and packet priority.                                         */
  54/* There are no ill effects from too-large receive rings.               */
  55// 88-12-9 modify,
  56// #define TX_RING_SIZE    16
  57// #define RX_RING_SIZE    32
  58#define TX_RING_SIZE    6
  59#define RX_RING_SIZE    12
  60#define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct fealnx_desc)
  61#define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct fealnx_desc)
  62
  63/* Operational parameters that usually are not changed. */
  64/* Time in jiffies before concluding the transmitter is hung. */
  65#define TX_TIMEOUT      (2*HZ)
  66
  67#define PKT_BUF_SZ      1536	/* Size of each temporary Rx buffer. */
  68
  69
  70/* Include files, designed to support most kernel versions 2.0.0 and later. */
  71#include <linux/module.h>
  72#include <linux/kernel.h>
  73#include <linux/string.h>
  74#include <linux/timer.h>
  75#include <linux/errno.h>
  76#include <linux/ioport.h>
  77#include <linux/interrupt.h>
  78#include <linux/pci.h>
  79#include <linux/netdevice.h>
  80#include <linux/etherdevice.h>
  81#include <linux/skbuff.h>
  82#include <linux/init.h>
  83#include <linux/mii.h>
  84#include <linux/ethtool.h>
  85#include <linux/crc32.h>
  86#include <linux/delay.h>
  87#include <linux/bitops.h>
  88
  89#include <asm/processor.h>	/* Processor type for cache alignment. */
  90#include <asm/io.h>
  91#include <linux/uaccess.h>
  92#include <asm/byteorder.h>
  93
  94/* These identify the driver base version and may not be removed. */
  95static const char version[] =
  96	KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
  97
  98
  99/* This driver was written to use PCI memory space, however some x86 systems
 100   work only with I/O space accesses. */
 101#ifndef __alpha__
 102#define USE_IO_OPS
 103#endif
 104
 105/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
 106/* This is only in the support-all-kernels source code. */
 107
 108#define RUN_AT(x) (jiffies + (x))
 109
 110MODULE_AUTHOR("Myson or whoever");
 111MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
 112MODULE_LICENSE("GPL");
 113module_param(max_interrupt_work, int, 0);
 114module_param(debug, int, 0);
 115module_param(rx_copybreak, int, 0);
 116module_param(multicast_filter_limit, int, 0);
 117module_param_array(options, int, NULL, 0);
 118module_param_array(full_duplex, int, NULL, 0);
 119MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
 120MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
 121MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
 122MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
 123MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
 124MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
 125
 126enum {
 127	MIN_REGION_SIZE		= 136,
 128};
 129
 130/* A chip capabilities table, matching the entries in pci_tbl[] above. */
 131enum chip_capability_flags {
 132	HAS_MII_XCVR,
 133	HAS_CHIP_XCVR,
 134};
 135
 136/* 89/6/13 add, */
 137/* for different PHY */
 138enum phy_type_flags {
 139	MysonPHY = 1,
 140	AhdocPHY = 2,
 141	SeeqPHY = 3,
 142	MarvellPHY = 4,
 143	Myson981 = 5,
 144	LevelOnePHY = 6,
 145	OtherPHY = 10,
 146};
 147
 148struct chip_info {
 149	char *chip_name;
 150	int flags;
 151};
 152
 153static const struct chip_info skel_netdrv_tbl[] = {
 154 	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
 155	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
 156	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
 157};
 158
 159/* Offsets to the Command and Status Registers. */
 160enum fealnx_offsets {
 161	PAR0 = 0x0,		/* physical address 0-3 */
 162	PAR1 = 0x04,		/* physical address 4-5 */
 163	MAR0 = 0x08,		/* multicast address 0-3 */
 164	MAR1 = 0x0C,		/* multicast address 4-7 */
 165	FAR0 = 0x10,		/* flow-control address 0-3 */
 166	FAR1 = 0x14,		/* flow-control address 4-5 */
 167	TCRRCR = 0x18,		/* receive & transmit configuration */
 168	BCR = 0x1C,		/* bus command */
 169	TXPDR = 0x20,		/* transmit polling demand */
 170	RXPDR = 0x24,		/* receive polling demand */
 171	RXCWP = 0x28,		/* receive current word pointer */
 172	TXLBA = 0x2C,		/* transmit list base address */
 173	RXLBA = 0x30,		/* receive list base address */
 174	ISR = 0x34,		/* interrupt status */
 175	IMR = 0x38,		/* interrupt mask */
 176	FTH = 0x3C,		/* flow control high/low threshold */
 177	MANAGEMENT = 0x40,	/* bootrom/eeprom and mii management */
 178	TALLY = 0x44,		/* tally counters for crc and mpa */
 179	TSR = 0x48,		/* tally counter for transmit status */
 180	BMCRSR = 0x4c,		/* basic mode control and status */
 181	PHYIDENTIFIER = 0x50,	/* phy identifier */
 182	ANARANLPAR = 0x54,	/* auto-negotiation advertisement and link
 183				   partner ability */
 184	ANEROCR = 0x58,		/* auto-negotiation expansion and pci conf. */
 185	BPREMRPSR = 0x5c,	/* bypass & receive error mask and phy status */
 186};
 187
 188/* Bits in the interrupt status/enable registers. */
 189/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
 190enum intr_status_bits {
 191	RFCON = 0x00020000,	/* receive flow control xon packet */
 192	RFCOFF = 0x00010000,	/* receive flow control xoff packet */
 193	LSCStatus = 0x00008000,	/* link status change */
 194	ANCStatus = 0x00004000,	/* autonegotiation completed */
 195	FBE = 0x00002000,	/* fatal bus error */
 196	FBEMask = 0x00001800,	/* mask bit12-11 */
 197	ParityErr = 0x00000000,	/* parity error */
 198	TargetErr = 0x00001000,	/* target abort */
 199	MasterErr = 0x00000800,	/* master error */
 200	TUNF = 0x00000400,	/* transmit underflow */
 201	ROVF = 0x00000200,	/* receive overflow */
 202	ETI = 0x00000100,	/* transmit early int */
 203	ERI = 0x00000080,	/* receive early int */
 204	CNTOVF = 0x00000040,	/* counter overflow */
 205	RBU = 0x00000020,	/* receive buffer unavailable */
 206	TBU = 0x00000010,	/* transmit buffer unavilable */
 207	TI = 0x00000008,	/* transmit interrupt */
 208	RI = 0x00000004,	/* receive interrupt */
 209	RxErr = 0x00000002,	/* receive error */
 210};
 211
 212/* Bits in the NetworkConfig register, W for writing, R for reading */
 213/* FIXME: some names are invented by me. Marked with (name?) */
 214/* If you have docs and know bit names, please fix 'em */
 215enum rx_mode_bits {
 216	CR_W_ENH	= 0x02000000,	/* enhanced mode (name?) */
 217	CR_W_FD		= 0x00100000,	/* full duplex */
 218	CR_W_PS10	= 0x00080000,	/* 10 mbit */
 219	CR_W_TXEN	= 0x00040000,	/* tx enable (name?) */
 220	CR_W_PS1000	= 0x00010000,	/* 1000 mbit */
 221     /* CR_W_RXBURSTMASK= 0x00000e00, Im unsure about this */
 222	CR_W_RXMODEMASK	= 0x000000e0,
 223	CR_W_PROM	= 0x00000080,	/* promiscuous mode */
 224	CR_W_AB		= 0x00000040,	/* accept broadcast */
 225	CR_W_AM		= 0x00000020,	/* accept mutlicast */
 226	CR_W_ARP	= 0x00000008,	/* receive runt pkt */
 227	CR_W_ALP	= 0x00000004,	/* receive long pkt */
 228	CR_W_SEP	= 0x00000002,	/* receive error pkt */
 229	CR_W_RXEN	= 0x00000001,	/* rx enable (unicast?) (name?) */
 230
 231	CR_R_TXSTOP	= 0x04000000,	/* tx stopped (name?) */
 232	CR_R_FD		= 0x00100000,	/* full duplex detected */
 233	CR_R_PS10	= 0x00080000,	/* 10 mbit detected */
 234	CR_R_RXSTOP	= 0x00008000,	/* rx stopped (name?) */
 235};
 236
 237/* The Tulip Rx and Tx buffer descriptors. */
 238struct fealnx_desc {
 239	s32 status;
 240	s32 control;
 241	u32 buffer;
 242	u32 next_desc;
 243	struct fealnx_desc *next_desc_logical;
 244	struct sk_buff *skbuff;
 245	u32 reserved1;
 246	u32 reserved2;
 247};
 248
 249/* Bits in network_desc.status */
 250enum rx_desc_status_bits {
 251	RXOWN = 0x80000000,	/* own bit */
 252	FLNGMASK = 0x0fff0000,	/* frame length */
 253	FLNGShift = 16,
 254	MARSTATUS = 0x00004000,	/* multicast address received */
 255	BARSTATUS = 0x00002000,	/* broadcast address received */
 256	PHYSTATUS = 0x00001000,	/* physical address received */
 257	RXFSD = 0x00000800,	/* first descriptor */
 258	RXLSD = 0x00000400,	/* last descriptor */
 259	ErrorSummary = 0x80,	/* error summary */
 260	RUNTPKT = 0x40,		/* runt packet received */
 261	LONGPKT = 0x20,		/* long packet received */
 262	FAE = 0x10,		/* frame align error */
 263	CRC = 0x08,		/* crc error */
 264	RXER = 0x04,		/* receive error */
 265};
 266
 267enum rx_desc_control_bits {
 268	RXIC = 0x00800000,	/* interrupt control */
 269	RBSShift = 0,
 270};
 271
 272enum tx_desc_status_bits {
 273	TXOWN = 0x80000000,	/* own bit */
 274	JABTO = 0x00004000,	/* jabber timeout */
 275	CSL = 0x00002000,	/* carrier sense lost */
 276	LC = 0x00001000,	/* late collision */
 277	EC = 0x00000800,	/* excessive collision */
 278	UDF = 0x00000400,	/* fifo underflow */
 279	DFR = 0x00000200,	/* deferred */
 280	HF = 0x00000100,	/* heartbeat fail */
 281	NCRMask = 0x000000ff,	/* collision retry count */
 282	NCRShift = 0,
 283};
 284
 285enum tx_desc_control_bits {
 286	TXIC = 0x80000000,	/* interrupt control */
 287	ETIControl = 0x40000000,	/* early transmit interrupt */
 288	TXLD = 0x20000000,	/* last descriptor */
 289	TXFD = 0x10000000,	/* first descriptor */
 290	CRCEnable = 0x08000000,	/* crc control */
 291	PADEnable = 0x04000000,	/* padding control */
 292	RetryTxLC = 0x02000000,	/* retry late collision */
 293	PKTSMask = 0x3ff800,	/* packet size bit21-11 */
 294	PKTSShift = 11,
 295	TBSMask = 0x000007ff,	/* transmit buffer bit 10-0 */
 296	TBSShift = 0,
 297};
 298
 299/* BootROM/EEPROM/MII Management Register */
 300#define MASK_MIIR_MII_READ       0x00000000
 301#define MASK_MIIR_MII_WRITE      0x00000008
 302#define MASK_MIIR_MII_MDO        0x00000004
 303#define MASK_MIIR_MII_MDI        0x00000002
 304#define MASK_MIIR_MII_MDC        0x00000001
 305
 306/* ST+OP+PHYAD+REGAD+TA */
 307#define OP_READ             0x6000	/* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
 308#define OP_WRITE            0x5002	/* ST:01+OP:01+PHYAD+REGAD+TA:10 */
 309
 310/* ------------------------------------------------------------------------- */
 311/*      Constants for Myson PHY                                              */
 312/* ------------------------------------------------------------------------- */
 313#define MysonPHYID      0xd0000302
 314/* 89-7-27 add, (begin) */
 315#define MysonPHYID0     0x0302
 316#define StatusRegister  18
 317#define SPEED100        0x0400	// bit10
 318#define FULLMODE        0x0800	// bit11
 319/* 89-7-27 add, (end) */
 320
 321/* ------------------------------------------------------------------------- */
 322/*      Constants for Seeq 80225 PHY                                         */
 323/* ------------------------------------------------------------------------- */
 324#define SeeqPHYID0      0x0016
 325
 326#define MIIRegister18   18
 327#define SPD_DET_100     0x80
 328#define DPLX_DET_FULL   0x40
 329
 330/* ------------------------------------------------------------------------- */
 331/*      Constants for Ahdoc 101 PHY                                          */
 332/* ------------------------------------------------------------------------- */
 333#define AhdocPHYID0     0x0022
 334
 335#define DiagnosticReg   18
 336#define DPLX_FULL       0x0800
 337#define Speed_100       0x0400
 338
 339/* 89/6/13 add, */
 340/* -------------------------------------------------------------------------- */
 341/*      Constants                                                             */
 342/* -------------------------------------------------------------------------- */
 343#define MarvellPHYID0           0x0141
 344#define LevelOnePHYID0		0x0013
 345
 346#define MII1000BaseTControlReg  9
 347#define MII1000BaseTStatusReg   10
 348#define SpecificReg		17
 349
 350/* for 1000BaseT Control Register */
 351#define PHYAbletoPerform1000FullDuplex  0x0200
 352#define PHYAbletoPerform1000HalfDuplex  0x0100
 353#define PHY1000AbilityMask              0x300
 354
 355// for phy specific status register, marvell phy.
 356#define SpeedMask       0x0c000
 357#define Speed_1000M     0x08000
 358#define Speed_100M      0x4000
 359#define Speed_10M       0
 360#define Full_Duplex     0x2000
 361
 362// 89/12/29 add, for phy specific status register, levelone phy, (begin)
 363#define LXT1000_100M    0x08000
 364#define LXT1000_1000M   0x0c000
 365#define LXT1000_Full    0x200
 366// 89/12/29 add, for phy specific status register, levelone phy, (end)
 367
 368/* for 3-in-1 case, BMCRSR register */
 369#define LinkIsUp2	0x00040000
 370
 371/* for PHY */
 372#define LinkIsUp        0x0004
 373
 374
 375struct netdev_private {
 376	/* Descriptor rings first for alignment. */
 377	struct fealnx_desc *rx_ring;
 378	struct fealnx_desc *tx_ring;
 379
 380	dma_addr_t rx_ring_dma;
 381	dma_addr_t tx_ring_dma;
 382
 383	spinlock_t lock;
 384
 385	/* Media monitoring timer. */
 386	struct timer_list timer;
 387
 388	/* Reset timer */
 389	struct timer_list reset_timer;
 390	int reset_timer_armed;
 391	unsigned long crvalue_sv;
 392	unsigned long imrvalue_sv;
 393
 394	/* Frequently used values: keep some adjacent for cache effect. */
 395	int flags;
 396	struct pci_dev *pci_dev;
 397	unsigned long crvalue;
 398	unsigned long bcrvalue;
 399	unsigned long imrvalue;
 400	struct fealnx_desc *cur_rx;
 401	struct fealnx_desc *lack_rxbuf;
 402	int really_rx_count;
 403	struct fealnx_desc *cur_tx;
 404	struct fealnx_desc *cur_tx_copy;
 405	int really_tx_count;
 406	int free_tx_count;
 407	unsigned int rx_buf_sz;	/* Based on MTU+slack. */
 408
 409	/* These values are keep track of the transceiver/media in use. */
 410	unsigned int linkok;
 411	unsigned int line_speed;
 412	unsigned int duplexmode;
 413	unsigned int default_port:4;	/* Last dev->if_port value. */
 414	unsigned int PHYType;
 415
 416	/* MII transceiver section. */
 417	int mii_cnt;		/* MII device addresses. */
 418	unsigned char phys[2];	/* MII device addresses. */
 419	struct mii_if_info mii;
 420	void __iomem *mem;
 421};
 422
 423
 424static int mdio_read(struct net_device *dev, int phy_id, int location);
 425static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 426static int netdev_open(struct net_device *dev);
 427static void getlinktype(struct net_device *dev);
 428static void getlinkstatus(struct net_device *dev);
 429static void netdev_timer(struct timer_list *t);
 430static void reset_timer(struct timer_list *t);
 431static void fealnx_tx_timeout(struct net_device *dev);
 432static void init_ring(struct net_device *dev);
 433static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 434static irqreturn_t intr_handler(int irq, void *dev_instance);
 435static int netdev_rx(struct net_device *dev);
 436static void set_rx_mode(struct net_device *dev);
 437static void __set_rx_mode(struct net_device *dev);
 438static struct net_device_stats *get_stats(struct net_device *dev);
 439static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 440static const struct ethtool_ops netdev_ethtool_ops;
 441static int netdev_close(struct net_device *dev);
 442static void reset_rx_descriptors(struct net_device *dev);
 443static void reset_tx_descriptors(struct net_device *dev);
 444
 445static void stop_nic_rx(void __iomem *ioaddr, long crvalue)
 446{
 447	int delay = 0x1000;
 448	iowrite32(crvalue & ~(CR_W_RXEN), ioaddr + TCRRCR);
 449	while (--delay) {
 450		if ( (ioread32(ioaddr + TCRRCR) & CR_R_RXSTOP) == CR_R_RXSTOP)
 451			break;
 452	}
 453}
 454
 455
 456static void stop_nic_rxtx(void __iomem *ioaddr, long crvalue)
 457{
 458	int delay = 0x1000;
 459	iowrite32(crvalue & ~(CR_W_RXEN+CR_W_TXEN), ioaddr + TCRRCR);
 460	while (--delay) {
 461		if ( (ioread32(ioaddr + TCRRCR) & (CR_R_RXSTOP+CR_R_TXSTOP))
 462					    == (CR_R_RXSTOP+CR_R_TXSTOP) )
 463			break;
 464	}
 465}
 466
 467static const struct net_device_ops netdev_ops = {
 468	.ndo_open		= netdev_open,
 469	.ndo_stop		= netdev_close,
 470	.ndo_start_xmit		= start_tx,
 471	.ndo_get_stats 		= get_stats,
 472	.ndo_set_rx_mode	= set_rx_mode,
 473	.ndo_do_ioctl		= mii_ioctl,
 474	.ndo_tx_timeout		= fealnx_tx_timeout,
 475	.ndo_set_mac_address 	= eth_mac_addr,
 476	.ndo_validate_addr	= eth_validate_addr,
 477};
 478
 479static int fealnx_init_one(struct pci_dev *pdev,
 480			   const struct pci_device_id *ent)
 481{
 482	struct netdev_private *np;
 483	int i, option, err, irq;
 484	static int card_idx = -1;
 485	char boardname[12];
 486	void __iomem *ioaddr;
 487	unsigned long len;
 488	unsigned int chip_id = ent->driver_data;
 489	struct net_device *dev;
 490	void *ring_space;
 491	dma_addr_t ring_dma;
 
 492#ifdef USE_IO_OPS
 493	int bar = 0;
 494#else
 495	int bar = 1;
 496#endif
 497
 498/* when built into the kernel, we only print version if device is found */
 499#ifndef MODULE
 500	static int printed_version;
 501	if (!printed_version++)
 502		printk(version);
 503#endif
 504
 505	card_idx++;
 506	sprintf(boardname, "fealnx%d", card_idx);
 507
 508	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
 509
 510	i = pci_enable_device(pdev);
 511	if (i) return i;
 512	pci_set_master(pdev);
 513
 514	len = pci_resource_len(pdev, bar);
 515	if (len < MIN_REGION_SIZE) {
 516		dev_err(&pdev->dev,
 517			   "region size %ld too small, aborting\n", len);
 518		return -ENODEV;
 519	}
 520
 521	i = pci_request_regions(pdev, boardname);
 522	if (i)
 523		return i;
 524
 525	irq = pdev->irq;
 526
 527	ioaddr = pci_iomap(pdev, bar, len);
 528	if (!ioaddr) {
 529		err = -ENOMEM;
 530		goto err_out_res;
 531	}
 532
 533	dev = alloc_etherdev(sizeof(struct netdev_private));
 534	if (!dev) {
 535		err = -ENOMEM;
 536		goto err_out_unmap;
 537	}
 538	SET_NETDEV_DEV(dev, &pdev->dev);
 539
 540	/* read ethernet id */
 541	for (i = 0; i < 6; ++i)
 542		dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
 
 543
 544	/* Reset the chip to erase previous misconfiguration. */
 545	iowrite32(0x00000001, ioaddr + BCR);
 546
 547	/* Make certain the descriptor lists are aligned. */
 548	np = netdev_priv(dev);
 549	np->mem = ioaddr;
 550	spin_lock_init(&np->lock);
 551	np->pci_dev = pdev;
 552	np->flags = skel_netdrv_tbl[chip_id].flags;
 553	pci_set_drvdata(pdev, dev);
 554	np->mii.dev = dev;
 555	np->mii.mdio_read = mdio_read;
 556	np->mii.mdio_write = mdio_write;
 557	np->mii.phy_id_mask = 0x1f;
 558	np->mii.reg_num_mask = 0x1f;
 559
 560	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 
 561	if (!ring_space) {
 562		err = -ENOMEM;
 563		goto err_out_free_dev;
 564	}
 565	np->rx_ring = ring_space;
 566	np->rx_ring_dma = ring_dma;
 567
 568	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 
 569	if (!ring_space) {
 570		err = -ENOMEM;
 571		goto err_out_free_rx;
 572	}
 573	np->tx_ring = ring_space;
 574	np->tx_ring_dma = ring_dma;
 575
 576	/* find the connected MII xcvrs */
 577	if (np->flags == HAS_MII_XCVR) {
 578		int phy, phy_idx = 0;
 579
 580		for (phy = 1; phy < 32 && phy_idx < ARRAY_SIZE(np->phys);
 581			       phy++) {
 582			int mii_status = mdio_read(dev, phy, 1);
 583
 584			if (mii_status != 0xffff && mii_status != 0x0000) {
 585				np->phys[phy_idx++] = phy;
 586				dev_info(&pdev->dev,
 587				       "MII PHY found at address %d, status "
 588				       "0x%4.4x.\n", phy, mii_status);
 589				/* get phy type */
 590				{
 591					unsigned int data;
 592
 593					data = mdio_read(dev, np->phys[0], 2);
 594					if (data == SeeqPHYID0)
 595						np->PHYType = SeeqPHY;
 596					else if (data == AhdocPHYID0)
 597						np->PHYType = AhdocPHY;
 598					else if (data == MarvellPHYID0)
 599						np->PHYType = MarvellPHY;
 600					else if (data == MysonPHYID0)
 601						np->PHYType = Myson981;
 602					else if (data == LevelOnePHYID0)
 603						np->PHYType = LevelOnePHY;
 604					else
 605						np->PHYType = OtherPHY;
 606				}
 607			}
 608		}
 609
 610		np->mii_cnt = phy_idx;
 611		if (phy_idx == 0)
 612			dev_warn(&pdev->dev,
 613				"MII PHY not found -- this device may "
 614			       "not operate correctly.\n");
 615	} else {
 616		np->phys[0] = 32;
 617/* 89/6/23 add, (begin) */
 618		/* get phy type */
 619		if (ioread32(ioaddr + PHYIDENTIFIER) == MysonPHYID)
 620			np->PHYType = MysonPHY;
 621		else
 622			np->PHYType = OtherPHY;
 623	}
 624	np->mii.phy_id = np->phys[0];
 625
 626	if (dev->mem_start)
 627		option = dev->mem_start;
 628
 629	/* The lower four bits are the media type. */
 630	if (option > 0) {
 631		if (option & 0x200)
 632			np->mii.full_duplex = 1;
 633		np->default_port = option & 15;
 634	}
 635
 636	if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
 637		np->mii.full_duplex = full_duplex[card_idx];
 638
 639	if (np->mii.full_duplex) {
 640		dev_info(&pdev->dev, "Media type forced to Full Duplex.\n");
 641/* 89/6/13 add, (begin) */
 642//      if (np->PHYType==MarvellPHY)
 643		if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
 644			unsigned int data;
 645
 646			data = mdio_read(dev, np->phys[0], 9);
 647			data = (data & 0xfcff) | 0x0200;
 648			mdio_write(dev, np->phys[0], 9, data);
 649		}
 650/* 89/6/13 add, (end) */
 651		if (np->flags == HAS_MII_XCVR)
 652			mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
 653		else
 654			iowrite32(ADVERTISE_FULL, ioaddr + ANARANLPAR);
 655		np->mii.force_media = 1;
 656	}
 657
 658	dev->netdev_ops = &netdev_ops;
 659	dev->ethtool_ops = &netdev_ethtool_ops;
 660	dev->watchdog_timeo = TX_TIMEOUT;
 661
 662	err = register_netdev(dev);
 663	if (err)
 664		goto err_out_free_tx;
 665
 666	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 667	       dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
 668	       dev->dev_addr, irq);
 669
 670	return 0;
 671
 672err_out_free_tx:
 673	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 
 674err_out_free_rx:
 675	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 
 676err_out_free_dev:
 677	free_netdev(dev);
 678err_out_unmap:
 679	pci_iounmap(pdev, ioaddr);
 680err_out_res:
 681	pci_release_regions(pdev);
 682	return err;
 683}
 684
 685
 686static void fealnx_remove_one(struct pci_dev *pdev)
 687{
 688	struct net_device *dev = pci_get_drvdata(pdev);
 689
 690	if (dev) {
 691		struct netdev_private *np = netdev_priv(dev);
 692
 693		pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
 694			np->tx_ring_dma);
 695		pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
 696			np->rx_ring_dma);
 697		unregister_netdev(dev);
 698		pci_iounmap(pdev, np->mem);
 699		free_netdev(dev);
 700		pci_release_regions(pdev);
 701	} else
 702		printk(KERN_ERR "fealnx: remove for unknown device\n");
 703}
 704
 705
 706static ulong m80x_send_cmd_to_phy(void __iomem *miiport, int opcode, int phyad, int regad)
 707{
 708	ulong miir;
 709	int i;
 710	unsigned int mask, data;
 711
 712	/* enable MII output */
 713	miir = (ulong) ioread32(miiport);
 714	miir &= 0xfffffff0;
 715
 716	miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
 717
 718	/* send 32 1's preamble */
 719	for (i = 0; i < 32; i++) {
 720		/* low MDC; MDO is already high (miir) */
 721		miir &= ~MASK_MIIR_MII_MDC;
 722		iowrite32(miir, miiport);
 723
 724		/* high MDC */
 725		miir |= MASK_MIIR_MII_MDC;
 726		iowrite32(miir, miiport);
 727	}
 728
 729	/* calculate ST+OP+PHYAD+REGAD+TA */
 730	data = opcode | (phyad << 7) | (regad << 2);
 731
 732	/* sent out */
 733	mask = 0x8000;
 734	while (mask) {
 735		/* low MDC, prepare MDO */
 736		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
 737		if (mask & data)
 738			miir |= MASK_MIIR_MII_MDO;
 739
 740		iowrite32(miir, miiport);
 741		/* high MDC */
 742		miir |= MASK_MIIR_MII_MDC;
 743		iowrite32(miir, miiport);
 744		udelay(30);
 745
 746		/* next */
 747		mask >>= 1;
 748		if (mask == 0x2 && opcode == OP_READ)
 749			miir &= ~MASK_MIIR_MII_WRITE;
 750	}
 751	return miir;
 752}
 753
 754
 755static int mdio_read(struct net_device *dev, int phyad, int regad)
 756{
 757	struct netdev_private *np = netdev_priv(dev);
 758	void __iomem *miiport = np->mem + MANAGEMENT;
 759	ulong miir;
 760	unsigned int mask, data;
 761
 762	miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
 763
 764	/* read data */
 765	mask = 0x8000;
 766	data = 0;
 767	while (mask) {
 768		/* low MDC */
 769		miir &= ~MASK_MIIR_MII_MDC;
 770		iowrite32(miir, miiport);
 771
 772		/* read MDI */
 773		miir = ioread32(miiport);
 774		if (miir & MASK_MIIR_MII_MDI)
 775			data |= mask;
 776
 777		/* high MDC, and wait */
 778		miir |= MASK_MIIR_MII_MDC;
 779		iowrite32(miir, miiport);
 780		udelay(30);
 781
 782		/* next */
 783		mask >>= 1;
 784	}
 785
 786	/* low MDC */
 787	miir &= ~MASK_MIIR_MII_MDC;
 788	iowrite32(miir, miiport);
 789
 790	return data & 0xffff;
 791}
 792
 793
 794static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
 795{
 796	struct netdev_private *np = netdev_priv(dev);
 797	void __iomem *miiport = np->mem + MANAGEMENT;
 798	ulong miir;
 799	unsigned int mask;
 800
 801	miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
 802
 803	/* write data */
 804	mask = 0x8000;
 805	while (mask) {
 806		/* low MDC, prepare MDO */
 807		miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
 808		if (mask & data)
 809			miir |= MASK_MIIR_MII_MDO;
 810		iowrite32(miir, miiport);
 811
 812		/* high MDC */
 813		miir |= MASK_MIIR_MII_MDC;
 814		iowrite32(miir, miiport);
 815
 816		/* next */
 817		mask >>= 1;
 818	}
 819
 820	/* low MDC */
 821	miir &= ~MASK_MIIR_MII_MDC;
 822	iowrite32(miir, miiport);
 823}
 824
 825
 826static int netdev_open(struct net_device *dev)
 827{
 828	struct netdev_private *np = netdev_priv(dev);
 829	void __iomem *ioaddr = np->mem;
 830	const int irq = np->pci_dev->irq;
 831	int rc, i;
 832
 833	iowrite32(0x00000001, ioaddr + BCR);	/* Reset */
 834
 835	rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
 836	if (rc)
 837		return -EAGAIN;
 838
 839	for (i = 0; i < 3; i++)
 840		iowrite16(((unsigned short*)dev->dev_addr)[i],
 841				ioaddr + PAR0 + i*2);
 842
 843	init_ring(dev);
 844
 845	iowrite32(np->rx_ring_dma, ioaddr + RXLBA);
 846	iowrite32(np->tx_ring_dma, ioaddr + TXLBA);
 847
 848	/* Initialize other registers. */
 849	/* Configure the PCI bus bursts and FIFO thresholds.
 850	   486: Set 8 longword burst.
 851	   586: no burst limit.
 852	   Burst length 5:3
 853	   0 0 0   1
 854	   0 0 1   4
 855	   0 1 0   8
 856	   0 1 1   16
 857	   1 0 0   32
 858	   1 0 1   64
 859	   1 1 0   128
 860	   1 1 1   256
 861	   Wait the specified 50 PCI cycles after a reset by initializing
 862	   Tx and Rx queues and the address filter list.
 863	   FIXME (Ueimor): optimistic for alpha + posted writes ? */
 864
 865	np->bcrvalue = 0x10;	/* little-endian, 8 burst length */
 866#ifdef __BIG_ENDIAN
 867	np->bcrvalue |= 0x04;	/* big-endian */
 868#endif
 869
 870#if defined(__i386__) && !defined(MODULE)
 871	if (boot_cpu_data.x86 <= 4)
 872		np->crvalue = 0xa00;
 873	else
 874#endif
 875		np->crvalue = 0xe00;	/* rx 128 burst length */
 876
 877
 878// 89/12/29 add,
 879// 90/1/16 modify,
 880//   np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
 881	np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
 882	if (np->pci_dev->device == 0x891) {
 883		np->bcrvalue |= 0x200;	/* set PROG bit */
 884		np->crvalue |= CR_W_ENH;	/* set enhanced bit */
 885		np->imrvalue |= ETI;
 886	}
 887	iowrite32(np->bcrvalue, ioaddr + BCR);
 888
 889	if (dev->if_port == 0)
 890		dev->if_port = np->default_port;
 891
 892	iowrite32(0, ioaddr + RXPDR);
 893// 89/9/1 modify,
 894//   np->crvalue = 0x00e40001;    /* tx store and forward, tx/rx enable */
 895	np->crvalue |= 0x00e40001;	/* tx store and forward, tx/rx enable */
 896	np->mii.full_duplex = np->mii.force_media;
 897	getlinkstatus(dev);
 898	if (np->linkok)
 899		getlinktype(dev);
 900	__set_rx_mode(dev);
 901
 902	netif_start_queue(dev);
 903
 904	/* Clear and Enable interrupts by setting the interrupt mask. */
 905	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
 906	iowrite32(np->imrvalue, ioaddr + IMR);
 907
 908	if (debug)
 909		printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
 910
 911	/* Set the timer to check for link beat. */
 912	timer_setup(&np->timer, netdev_timer, 0);
 913	np->timer.expires = RUN_AT(3 * HZ);
 914
 915	/* timer handler */
 916	add_timer(&np->timer);
 917
 918	timer_setup(&np->reset_timer, reset_timer, 0);
 919	np->reset_timer_armed = 0;
 920	return rc;
 921}
 922
 923
 924static void getlinkstatus(struct net_device *dev)
 925/* function: Routine will read MII Status Register to get link status.       */
 926/* input   : dev... pointer to the adapter block.                            */
 927/* output  : none.                                                           */
 928{
 929	struct netdev_private *np = netdev_priv(dev);
 930	unsigned int i, DelayTime = 0x1000;
 931
 932	np->linkok = 0;
 933
 934	if (np->PHYType == MysonPHY) {
 935		for (i = 0; i < DelayTime; ++i) {
 936			if (ioread32(np->mem + BMCRSR) & LinkIsUp2) {
 937				np->linkok = 1;
 938				return;
 939			}
 940			udelay(100);
 941		}
 942	} else {
 943		for (i = 0; i < DelayTime; ++i) {
 944			if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
 945				np->linkok = 1;
 946				return;
 947			}
 948			udelay(100);
 949		}
 950	}
 951}
 952
 953
 954static void getlinktype(struct net_device *dev)
 955{
 956	struct netdev_private *np = netdev_priv(dev);
 957
 958	if (np->PHYType == MysonPHY) {	/* 3-in-1 case */
 959		if (ioread32(np->mem + TCRRCR) & CR_R_FD)
 960			np->duplexmode = 2;	/* full duplex */
 961		else
 962			np->duplexmode = 1;	/* half duplex */
 963		if (ioread32(np->mem + TCRRCR) & CR_R_PS10)
 964			np->line_speed = 1;	/* 10M */
 965		else
 966			np->line_speed = 2;	/* 100M */
 967	} else {
 968		if (np->PHYType == SeeqPHY) {	/* this PHY is SEEQ 80225 */
 969			unsigned int data;
 970
 971			data = mdio_read(dev, np->phys[0], MIIRegister18);
 972			if (data & SPD_DET_100)
 973				np->line_speed = 2;	/* 100M */
 974			else
 975				np->line_speed = 1;	/* 10M */
 976			if (data & DPLX_DET_FULL)
 977				np->duplexmode = 2;	/* full duplex mode */
 978			else
 979				np->duplexmode = 1;	/* half duplex mode */
 980		} else if (np->PHYType == AhdocPHY) {
 981			unsigned int data;
 982
 983			data = mdio_read(dev, np->phys[0], DiagnosticReg);
 984			if (data & Speed_100)
 985				np->line_speed = 2;	/* 100M */
 986			else
 987				np->line_speed = 1;	/* 10M */
 988			if (data & DPLX_FULL)
 989				np->duplexmode = 2;	/* full duplex mode */
 990			else
 991				np->duplexmode = 1;	/* half duplex mode */
 992		}
 993/* 89/6/13 add, (begin) */
 994		else if (np->PHYType == MarvellPHY) {
 995			unsigned int data;
 996
 997			data = mdio_read(dev, np->phys[0], SpecificReg);
 998			if (data & Full_Duplex)
 999				np->duplexmode = 2;	/* full duplex mode */
1000			else
1001				np->duplexmode = 1;	/* half duplex mode */
1002			data &= SpeedMask;
1003			if (data == Speed_1000M)
1004				np->line_speed = 3;	/* 1000M */
1005			else if (data == Speed_100M)
1006				np->line_speed = 2;	/* 100M */
1007			else
1008				np->line_speed = 1;	/* 10M */
1009		}
1010/* 89/6/13 add, (end) */
1011/* 89/7/27 add, (begin) */
1012		else if (np->PHYType == Myson981) {
1013			unsigned int data;
1014
1015			data = mdio_read(dev, np->phys[0], StatusRegister);
1016
1017			if (data & SPEED100)
1018				np->line_speed = 2;
1019			else
1020				np->line_speed = 1;
1021
1022			if (data & FULLMODE)
1023				np->duplexmode = 2;
1024			else
1025				np->duplexmode = 1;
1026		}
1027/* 89/7/27 add, (end) */
1028/* 89/12/29 add */
1029		else if (np->PHYType == LevelOnePHY) {
1030			unsigned int data;
1031
1032			data = mdio_read(dev, np->phys[0], SpecificReg);
1033			if (data & LXT1000_Full)
1034				np->duplexmode = 2;	/* full duplex mode */
1035			else
1036				np->duplexmode = 1;	/* half duplex mode */
1037			data &= SpeedMask;
1038			if (data == LXT1000_1000M)
1039				np->line_speed = 3;	/* 1000M */
1040			else if (data == LXT1000_100M)
1041				np->line_speed = 2;	/* 100M */
1042			else
1043				np->line_speed = 1;	/* 10M */
1044		}
1045		np->crvalue &= (~CR_W_PS10) & (~CR_W_FD) & (~CR_W_PS1000);
1046		if (np->line_speed == 1)
1047			np->crvalue |= CR_W_PS10;
1048		else if (np->line_speed == 3)
1049			np->crvalue |= CR_W_PS1000;
1050		if (np->duplexmode == 2)
1051			np->crvalue |= CR_W_FD;
1052	}
1053}
1054
1055
1056/* Take lock before calling this */
1057static void allocate_rx_buffers(struct net_device *dev)
1058{
1059	struct netdev_private *np = netdev_priv(dev);
1060
1061	/*  allocate skb for rx buffers */
1062	while (np->really_rx_count != RX_RING_SIZE) {
1063		struct sk_buff *skb;
1064
1065		skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1066		if (skb == NULL)
1067			break;	/* Better luck next round. */
1068
1069		while (np->lack_rxbuf->skbuff)
1070			np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1071
1072		np->lack_rxbuf->skbuff = skb;
1073		np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
1074			np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
 
1075		np->lack_rxbuf->status = RXOWN;
1076		++np->really_rx_count;
1077	}
1078}
1079
1080
1081static void netdev_timer(struct timer_list *t)
1082{
1083	struct netdev_private *np = from_timer(np, t, timer);
1084	struct net_device *dev = np->mii.dev;
1085	void __iomem *ioaddr = np->mem;
1086	int old_crvalue = np->crvalue;
1087	unsigned int old_linkok = np->linkok;
1088	unsigned long flags;
1089
1090	if (debug)
1091		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1092		       "config %8.8x.\n", dev->name, ioread32(ioaddr + ISR),
1093		       ioread32(ioaddr + TCRRCR));
1094
1095	spin_lock_irqsave(&np->lock, flags);
1096
1097	if (np->flags == HAS_MII_XCVR) {
1098		getlinkstatus(dev);
1099		if ((old_linkok == 0) && (np->linkok == 1)) {	/* we need to detect the media type again */
1100			getlinktype(dev);
1101			if (np->crvalue != old_crvalue) {
1102				stop_nic_rxtx(ioaddr, np->crvalue);
1103				iowrite32(np->crvalue, ioaddr + TCRRCR);
1104			}
1105		}
1106	}
1107
1108	allocate_rx_buffers(dev);
1109
1110	spin_unlock_irqrestore(&np->lock, flags);
1111
1112	np->timer.expires = RUN_AT(10 * HZ);
1113	add_timer(&np->timer);
1114}
1115
1116
1117/* Take lock before calling */
1118/* Reset chip and disable rx, tx and interrupts */
1119static void reset_and_disable_rxtx(struct net_device *dev)
1120{
1121	struct netdev_private *np = netdev_priv(dev);
1122	void __iomem *ioaddr = np->mem;
1123	int delay=51;
1124
1125	/* Reset the chip's Tx and Rx processes. */
1126	stop_nic_rxtx(ioaddr, 0);
1127
1128	/* Disable interrupts by clearing the interrupt mask. */
1129	iowrite32(0, ioaddr + IMR);
1130
1131	/* Reset the chip to erase previous misconfiguration. */
1132	iowrite32(0x00000001, ioaddr + BCR);
1133
1134	/* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1135	   We surely wait too long (address+data phase). Who cares? */
1136	while (--delay) {
1137		ioread32(ioaddr + BCR);
1138		rmb();
1139	}
1140}
1141
1142
1143/* Take lock before calling */
1144/* Restore chip after reset */
1145static void enable_rxtx(struct net_device *dev)
1146{
1147	struct netdev_private *np = netdev_priv(dev);
1148	void __iomem *ioaddr = np->mem;
1149
1150	reset_rx_descriptors(dev);
1151
1152	iowrite32(np->tx_ring_dma + ((char*)np->cur_tx - (char*)np->tx_ring),
1153		ioaddr + TXLBA);
1154	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1155		ioaddr + RXLBA);
1156
1157	iowrite32(np->bcrvalue, ioaddr + BCR);
1158
1159	iowrite32(0, ioaddr + RXPDR);
1160	__set_rx_mode(dev); /* changes np->crvalue, writes it into TCRRCR */
1161
1162	/* Clear and Enable interrupts by setting the interrupt mask. */
1163	iowrite32(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1164	iowrite32(np->imrvalue, ioaddr + IMR);
1165
1166	iowrite32(0, ioaddr + TXPDR);
1167}
1168
1169
1170static void reset_timer(struct timer_list *t)
1171{
1172	struct netdev_private *np = from_timer(np, t, reset_timer);
1173	struct net_device *dev = np->mii.dev;
1174	unsigned long flags;
1175
1176	printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name);
1177
1178	spin_lock_irqsave(&np->lock, flags);
1179	np->crvalue = np->crvalue_sv;
1180	np->imrvalue = np->imrvalue_sv;
1181
1182	reset_and_disable_rxtx(dev);
1183	/* works for me without this:
1184	reset_tx_descriptors(dev); */
1185	enable_rxtx(dev);
1186	netif_start_queue(dev); /* FIXME: or netif_wake_queue(dev); ? */
1187
1188	np->reset_timer_armed = 0;
1189
1190	spin_unlock_irqrestore(&np->lock, flags);
1191}
1192
1193
1194static void fealnx_tx_timeout(struct net_device *dev)
1195{
1196	struct netdev_private *np = netdev_priv(dev);
1197	void __iomem *ioaddr = np->mem;
1198	unsigned long flags;
1199	int i;
1200
1201	printk(KERN_WARNING
1202	       "%s: Transmit timed out, status %8.8x, resetting...\n",
1203	       dev->name, ioread32(ioaddr + ISR));
1204
1205	{
1206		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
1207		for (i = 0; i < RX_RING_SIZE; i++)
1208			printk(KERN_CONT " %8.8x",
1209			       (unsigned int) np->rx_ring[i].status);
1210		printk(KERN_CONT "\n");
1211		printk(KERN_DEBUG "  Tx ring %p: ", np->tx_ring);
1212		for (i = 0; i < TX_RING_SIZE; i++)
1213			printk(KERN_CONT " %4.4x", np->tx_ring[i].status);
1214		printk(KERN_CONT "\n");
1215	}
1216
1217	spin_lock_irqsave(&np->lock, flags);
1218
1219	reset_and_disable_rxtx(dev);
1220	reset_tx_descriptors(dev);
1221	enable_rxtx(dev);
1222
1223	spin_unlock_irqrestore(&np->lock, flags);
1224
1225	netif_trans_update(dev); /* prevent tx timeout */
1226	dev->stats.tx_errors++;
1227	netif_wake_queue(dev); /* or .._start_.. ?? */
1228}
1229
1230
1231/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1232static void init_ring(struct net_device *dev)
1233{
1234	struct netdev_private *np = netdev_priv(dev);
1235	int i;
1236
1237	/* initialize rx variables */
1238	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1239	np->cur_rx = &np->rx_ring[0];
1240	np->lack_rxbuf = np->rx_ring;
1241	np->really_rx_count = 0;
1242
1243	/* initial rx descriptors. */
1244	for (i = 0; i < RX_RING_SIZE; i++) {
1245		np->rx_ring[i].status = 0;
1246		np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1247		np->rx_ring[i].next_desc = np->rx_ring_dma +
1248			(i + 1)*sizeof(struct fealnx_desc);
1249		np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1250		np->rx_ring[i].skbuff = NULL;
1251	}
1252
1253	/* for the last rx descriptor */
1254	np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1255	np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1256
1257	/* allocate skb for rx buffers */
1258	for (i = 0; i < RX_RING_SIZE; i++) {
1259		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1260
1261		if (skb == NULL) {
1262			np->lack_rxbuf = &np->rx_ring[i];
1263			break;
1264		}
1265
1266		++np->really_rx_count;
1267		np->rx_ring[i].skbuff = skb;
1268		np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
1269			np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
 
1270		np->rx_ring[i].status = RXOWN;
1271		np->rx_ring[i].control |= RXIC;
1272	}
1273
1274	/* initialize tx variables */
1275	np->cur_tx = &np->tx_ring[0];
1276	np->cur_tx_copy = &np->tx_ring[0];
1277	np->really_tx_count = 0;
1278	np->free_tx_count = TX_RING_SIZE;
1279
1280	for (i = 0; i < TX_RING_SIZE; i++) {
1281		np->tx_ring[i].status = 0;
1282		/* do we need np->tx_ring[i].control = XXX; ?? */
1283		np->tx_ring[i].next_desc = np->tx_ring_dma +
1284			(i + 1)*sizeof(struct fealnx_desc);
1285		np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1286		np->tx_ring[i].skbuff = NULL;
1287	}
1288
1289	/* for the last tx descriptor */
1290	np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1291	np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1292}
1293
1294
1295static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1296{
1297	struct netdev_private *np = netdev_priv(dev);
1298	unsigned long flags;
1299
1300	spin_lock_irqsave(&np->lock, flags);
1301
1302	np->cur_tx_copy->skbuff = skb;
1303
1304#define one_buffer
1305#define BPT 1022
1306#if defined(one_buffer)
1307	np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1308		skb->len, PCI_DMA_TODEVICE);
1309	np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1310	np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1311	np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1312// 89/12/29 add,
1313	if (np->pci_dev->device == 0x891)
1314		np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1315	np->cur_tx_copy->status = TXOWN;
1316	np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1317	--np->free_tx_count;
1318#elif defined(two_buffer)
1319	if (skb->len > BPT) {
1320		struct fealnx_desc *next;
1321
1322		/* for the first descriptor */
1323		np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1324			BPT, PCI_DMA_TODEVICE);
 
1325		np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1326		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1327		np->cur_tx_copy->control |= (BPT << TBSShift);	/* buffer size */
1328
1329		/* for the last descriptor */
1330		next = np->cur_tx_copy->next_desc_logical;
1331		next->skbuff = skb;
1332		next->control = TXIC | TXLD | CRCEnable | PADEnable;
1333		next->control |= (skb->len << PKTSShift);	/* pkt size */
1334		next->control |= ((skb->len - BPT) << TBSShift);	/* buf size */
1335// 89/12/29 add,
1336		if (np->pci_dev->device == 0x891)
1337			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1338		next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
1339                                skb->len - BPT, PCI_DMA_TODEVICE);
 
1340
1341		next->status = TXOWN;
1342		np->cur_tx_copy->status = TXOWN;
1343
1344		np->cur_tx_copy = next->next_desc_logical;
1345		np->free_tx_count -= 2;
1346	} else {
1347		np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1348			skb->len, PCI_DMA_TODEVICE);
 
1349		np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1350		np->cur_tx_copy->control |= (skb->len << PKTSShift);	/* pkt size */
1351		np->cur_tx_copy->control |= (skb->len << TBSShift);	/* buffer size */
1352// 89/12/29 add,
1353		if (np->pci_dev->device == 0x891)
1354			np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1355		np->cur_tx_copy->status = TXOWN;
1356		np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1357		--np->free_tx_count;
1358	}
1359#endif
1360
1361	if (np->free_tx_count < 2)
1362		netif_stop_queue(dev);
1363	++np->really_tx_count;
1364	iowrite32(0, np->mem + TXPDR);
1365
1366	spin_unlock_irqrestore(&np->lock, flags);
1367	return NETDEV_TX_OK;
1368}
1369
1370
1371/* Take lock before calling */
1372/* Chip probably hosed tx ring. Clean up. */
1373static void reset_tx_descriptors(struct net_device *dev)
1374{
1375	struct netdev_private *np = netdev_priv(dev);
1376	struct fealnx_desc *cur;
1377	int i;
1378
1379	/* initialize tx variables */
1380	np->cur_tx = &np->tx_ring[0];
1381	np->cur_tx_copy = &np->tx_ring[0];
1382	np->really_tx_count = 0;
1383	np->free_tx_count = TX_RING_SIZE;
1384
1385	for (i = 0; i < TX_RING_SIZE; i++) {
1386		cur = &np->tx_ring[i];
1387		if (cur->skbuff) {
1388			pci_unmap_single(np->pci_dev, cur->buffer,
1389				cur->skbuff->len, PCI_DMA_TODEVICE);
1390			dev_kfree_skb_any(cur->skbuff);
1391			cur->skbuff = NULL;
1392		}
1393		cur->status = 0;
1394		cur->control = 0;	/* needed? */
1395		/* probably not needed. We do it for purely paranoid reasons */
1396		cur->next_desc = np->tx_ring_dma +
1397			(i + 1)*sizeof(struct fealnx_desc);
1398		cur->next_desc_logical = &np->tx_ring[i + 1];
1399	}
1400	/* for the last tx descriptor */
1401	np->tx_ring[TX_RING_SIZE - 1].next_desc = np->tx_ring_dma;
1402	np->tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->tx_ring[0];
1403}
1404
1405
1406/* Take lock and stop rx before calling this */
1407static void reset_rx_descriptors(struct net_device *dev)
1408{
1409	struct netdev_private *np = netdev_priv(dev);
1410	struct fealnx_desc *cur = np->cur_rx;
1411	int i;
1412
1413	allocate_rx_buffers(dev);
1414
1415	for (i = 0; i < RX_RING_SIZE; i++) {
1416		if (cur->skbuff)
1417			cur->status = RXOWN;
1418		cur = cur->next_desc_logical;
1419	}
1420
1421	iowrite32(np->rx_ring_dma + ((char*)np->cur_rx - (char*)np->rx_ring),
1422		np->mem + RXLBA);
1423}
1424
1425
1426/* The interrupt handler does all of the Rx thread work and cleans up
1427   after the Tx thread. */
1428static irqreturn_t intr_handler(int irq, void *dev_instance)
1429{
1430	struct net_device *dev = (struct net_device *) dev_instance;
1431	struct netdev_private *np = netdev_priv(dev);
1432	void __iomem *ioaddr = np->mem;
1433	long boguscnt = max_interrupt_work;
1434	unsigned int num_tx = 0;
1435	int handled = 0;
1436
1437	spin_lock(&np->lock);
1438
1439	iowrite32(0, ioaddr + IMR);
1440
1441	do {
1442		u32 intr_status = ioread32(ioaddr + ISR);
1443
1444		/* Acknowledge all of the current interrupt sources ASAP. */
1445		iowrite32(intr_status, ioaddr + ISR);
1446
1447		if (debug)
1448			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1449			       intr_status);
1450
1451		if (!(intr_status & np->imrvalue))
1452			break;
1453
1454		handled = 1;
1455
1456// 90/1/16 delete,
1457//
1458//      if (intr_status & FBE)
1459//      {   /* fatal error */
1460//          stop_nic_tx(ioaddr, 0);
1461//          stop_nic_rx(ioaddr, 0);
1462//          break;
1463//      };
1464
1465		if (intr_status & TUNF)
1466			iowrite32(0, ioaddr + TXPDR);
1467
1468		if (intr_status & CNTOVF) {
1469			/* missed pkts */
1470			dev->stats.rx_missed_errors +=
1471				ioread32(ioaddr + TALLY) & 0x7fff;
1472
1473			/* crc error */
1474			dev->stats.rx_crc_errors +=
1475			    (ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1476		}
1477
1478		if (intr_status & (RI | RBU)) {
1479			if (intr_status & RI)
1480				netdev_rx(dev);
1481			else {
1482				stop_nic_rx(ioaddr, np->crvalue);
1483				reset_rx_descriptors(dev);
1484				iowrite32(np->crvalue, ioaddr + TCRRCR);
1485			}
1486		}
1487
1488		while (np->really_tx_count) {
1489			long tx_status = np->cur_tx->status;
1490			long tx_control = np->cur_tx->control;
1491
1492			if (!(tx_control & TXLD)) {	/* this pkt is combined by two tx descriptors */
1493				struct fealnx_desc *next;
1494
1495				next = np->cur_tx->next_desc_logical;
1496				tx_status = next->status;
1497				tx_control = next->control;
1498			}
1499
1500			if (tx_status & TXOWN)
1501				break;
1502
1503			if (!(np->crvalue & CR_W_ENH)) {
1504				if (tx_status & (CSL | LC | EC | UDF | HF)) {
1505					dev->stats.tx_errors++;
1506					if (tx_status & EC)
1507						dev->stats.tx_aborted_errors++;
1508					if (tx_status & CSL)
1509						dev->stats.tx_carrier_errors++;
1510					if (tx_status & LC)
1511						dev->stats.tx_window_errors++;
1512					if (tx_status & UDF)
1513						dev->stats.tx_fifo_errors++;
1514					if ((tx_status & HF) && np->mii.full_duplex == 0)
1515						dev->stats.tx_heartbeat_errors++;
1516
1517				} else {
1518					dev->stats.tx_bytes +=
1519					    ((tx_control & PKTSMask) >> PKTSShift);
1520
1521					dev->stats.collisions +=
1522					    ((tx_status & NCRMask) >> NCRShift);
1523					dev->stats.tx_packets++;
1524				}
1525			} else {
1526				dev->stats.tx_bytes +=
1527				    ((tx_control & PKTSMask) >> PKTSShift);
1528				dev->stats.tx_packets++;
1529			}
1530
1531			/* Free the original skb. */
1532			pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
1533				np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
1534			dev_kfree_skb_irq(np->cur_tx->skbuff);
 
 
1535			np->cur_tx->skbuff = NULL;
1536			--np->really_tx_count;
1537			if (np->cur_tx->control & TXLD) {
1538				np->cur_tx = np->cur_tx->next_desc_logical;
1539				++np->free_tx_count;
1540			} else {
1541				np->cur_tx = np->cur_tx->next_desc_logical;
1542				np->cur_tx = np->cur_tx->next_desc_logical;
1543				np->free_tx_count += 2;
1544			}
1545			num_tx++;
1546		}		/* end of for loop */
1547
1548		if (num_tx && np->free_tx_count >= 2)
1549			netif_wake_queue(dev);
1550
1551		/* read transmit status for enhanced mode only */
1552		if (np->crvalue & CR_W_ENH) {
1553			long data;
1554
1555			data = ioread32(ioaddr + TSR);
1556			dev->stats.tx_errors += (data & 0xff000000) >> 24;
1557			dev->stats.tx_aborted_errors +=
1558				(data & 0xff000000) >> 24;
1559			dev->stats.tx_window_errors +=
1560				(data & 0x00ff0000) >> 16;
1561			dev->stats.collisions += (data & 0x0000ffff);
1562		}
1563
1564		if (--boguscnt < 0) {
1565			printk(KERN_WARNING "%s: Too much work at interrupt, "
1566			       "status=0x%4.4x.\n", dev->name, intr_status);
1567			if (!np->reset_timer_armed) {
1568				np->reset_timer_armed = 1;
1569				np->reset_timer.expires = RUN_AT(HZ/2);
1570				add_timer(&np->reset_timer);
1571				stop_nic_rxtx(ioaddr, 0);
1572				netif_stop_queue(dev);
1573				/* or netif_tx_disable(dev); ?? */
1574				/* Prevent other paths from enabling tx,rx,intrs */
1575				np->crvalue_sv = np->crvalue;
1576				np->imrvalue_sv = np->imrvalue;
1577				np->crvalue &= ~(CR_W_TXEN | CR_W_RXEN); /* or simply = 0? */
1578				np->imrvalue = 0;
1579			}
1580
1581			break;
1582		}
1583	} while (1);
1584
1585	/* read the tally counters */
1586	/* missed pkts */
1587	dev->stats.rx_missed_errors += ioread32(ioaddr + TALLY) & 0x7fff;
1588
1589	/* crc error */
1590	dev->stats.rx_crc_errors +=
1591		(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1592
1593	if (debug)
1594		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1595		       dev->name, ioread32(ioaddr + ISR));
1596
1597	iowrite32(np->imrvalue, ioaddr + IMR);
1598
1599	spin_unlock(&np->lock);
1600
1601	return IRQ_RETVAL(handled);
1602}
1603
1604
1605/* This routine is logically part of the interrupt handler, but separated
1606   for clarity and better register allocation. */
1607static int netdev_rx(struct net_device *dev)
1608{
1609	struct netdev_private *np = netdev_priv(dev);
1610	void __iomem *ioaddr = np->mem;
1611
1612	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1613	while (!(np->cur_rx->status & RXOWN) && np->cur_rx->skbuff) {
1614		s32 rx_status = np->cur_rx->status;
1615
1616		if (np->really_rx_count == 0)
1617			break;
1618
1619		if (debug)
1620			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n", rx_status);
1621
1622		if ((!((rx_status & RXFSD) && (rx_status & RXLSD))) ||
1623		    (rx_status & ErrorSummary)) {
1624			if (rx_status & ErrorSummary) {	/* there was a fatal error */
1625				if (debug)
1626					printk(KERN_DEBUG
1627					       "%s: Receive error, Rx status %8.8x.\n",
1628					       dev->name, rx_status);
1629
1630				dev->stats.rx_errors++;	/* end of a packet. */
1631				if (rx_status & (LONGPKT | RUNTPKT))
1632					dev->stats.rx_length_errors++;
1633				if (rx_status & RXER)
1634					dev->stats.rx_frame_errors++;
1635				if (rx_status & CRC)
1636					dev->stats.rx_crc_errors++;
1637			} else {
1638				int need_to_reset = 0;
1639				int desno = 0;
1640
1641				if (rx_status & RXFSD) {	/* this pkt is too long, over one rx buffer */
1642					struct fealnx_desc *cur;
1643
1644					/* check this packet is received completely? */
1645					cur = np->cur_rx;
1646					while (desno <= np->really_rx_count) {
1647						++desno;
1648						if ((!(cur->status & RXOWN)) &&
1649						    (cur->status & RXLSD))
1650							break;
1651						/* goto next rx descriptor */
1652						cur = cur->next_desc_logical;
1653					}
1654					if (desno > np->really_rx_count)
1655						need_to_reset = 1;
1656				} else	/* RXLSD did not find, something error */
1657					need_to_reset = 1;
1658
1659				if (need_to_reset == 0) {
1660					int i;
1661
1662					dev->stats.rx_length_errors++;
1663
1664					/* free all rx descriptors related this long pkt */
1665					for (i = 0; i < desno; ++i) {
1666						if (!np->cur_rx->skbuff) {
1667							printk(KERN_DEBUG
1668								"%s: I'm scared\n", dev->name);
1669							break;
1670						}
1671						np->cur_rx->status = RXOWN;
1672						np->cur_rx = np->cur_rx->next_desc_logical;
1673					}
1674					continue;
1675				} else {        /* rx error, need to reset this chip */
1676					stop_nic_rx(ioaddr, np->crvalue);
1677					reset_rx_descriptors(dev);
1678					iowrite32(np->crvalue, ioaddr + TCRRCR);
1679				}
1680				break;	/* exit the while loop */
1681			}
1682		} else {	/* this received pkt is ok */
1683
1684			struct sk_buff *skb;
1685			/* Omit the four octet CRC from the length. */
1686			short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1687
1688#ifndef final_version
1689			if (debug)
1690				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1691				       " status %x.\n", pkt_len, rx_status);
1692#endif
1693
1694			/* Check if the packet is long enough to accept without copying
1695			   to a minimally-sized skbuff. */
1696			if (pkt_len < rx_copybreak &&
1697			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1698				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1699				pci_dma_sync_single_for_cpu(np->pci_dev,
1700							    np->cur_rx->buffer,
1701							    np->rx_buf_sz,
1702							    PCI_DMA_FROMDEVICE);
1703				/* Call copy + cksum if available. */
1704
1705#if ! defined(__alpha__)
1706				skb_copy_to_linear_data(skb,
1707					np->cur_rx->skbuff->data, pkt_len);
1708				skb_put(skb, pkt_len);
1709#else
1710				skb_put_data(skb, np->cur_rx->skbuff->data,
1711					     pkt_len);
1712#endif
1713				pci_dma_sync_single_for_device(np->pci_dev,
1714							       np->cur_rx->buffer,
1715							       np->rx_buf_sz,
1716							       PCI_DMA_FROMDEVICE);
1717			} else {
1718				pci_unmap_single(np->pci_dev,
1719						 np->cur_rx->buffer,
1720						 np->rx_buf_sz,
1721						 PCI_DMA_FROMDEVICE);
1722				skb_put(skb = np->cur_rx->skbuff, pkt_len);
1723				np->cur_rx->skbuff = NULL;
1724				--np->really_rx_count;
1725			}
1726			skb->protocol = eth_type_trans(skb, dev);
1727			netif_rx(skb);
1728			dev->stats.rx_packets++;
1729			dev->stats.rx_bytes += pkt_len;
1730		}
1731
1732		np->cur_rx = np->cur_rx->next_desc_logical;
1733	}			/* end of while loop */
1734
1735	/*  allocate skb for rx buffers */
1736	allocate_rx_buffers(dev);
1737
1738	return 0;
1739}
1740
1741
1742static struct net_device_stats *get_stats(struct net_device *dev)
1743{
1744	struct netdev_private *np = netdev_priv(dev);
1745	void __iomem *ioaddr = np->mem;
1746
1747	/* The chip only need report frame silently dropped. */
1748	if (netif_running(dev)) {
1749		dev->stats.rx_missed_errors +=
1750			ioread32(ioaddr + TALLY) & 0x7fff;
1751		dev->stats.rx_crc_errors +=
1752			(ioread32(ioaddr + TALLY) & 0x7fff0000) >> 16;
1753	}
1754
1755	return &dev->stats;
1756}
1757
1758
1759/* for dev->set_multicast_list */
1760static void set_rx_mode(struct net_device *dev)
1761{
1762	spinlock_t *lp = &((struct netdev_private *)netdev_priv(dev))->lock;
1763	unsigned long flags;
1764	spin_lock_irqsave(lp, flags);
1765	__set_rx_mode(dev);
1766	spin_unlock_irqrestore(lp, flags);
1767}
1768
1769
1770/* Take lock before calling */
1771static void __set_rx_mode(struct net_device *dev)
1772{
1773	struct netdev_private *np = netdev_priv(dev);
1774	void __iomem *ioaddr = np->mem;
1775	u32 mc_filter[2];	/* Multicast hash filter */
1776	u32 rx_mode;
1777
1778	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1779		memset(mc_filter, 0xff, sizeof(mc_filter));
1780		rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1781	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1782		   (dev->flags & IFF_ALLMULTI)) {
1783		/* Too many to match, or accept all multicasts. */
1784		memset(mc_filter, 0xff, sizeof(mc_filter));
1785		rx_mode = CR_W_AB | CR_W_AM;
1786	} else {
1787		struct netdev_hw_addr *ha;
1788
1789		memset(mc_filter, 0, sizeof(mc_filter));
1790		netdev_for_each_mc_addr(ha, dev) {
1791			unsigned int bit;
1792			bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1793			mc_filter[bit >> 5] |= (1 << bit);
1794		}
1795		rx_mode = CR_W_AB | CR_W_AM;
1796	}
1797
1798	stop_nic_rxtx(ioaddr, np->crvalue);
1799
1800	iowrite32(mc_filter[0], ioaddr + MAR0);
1801	iowrite32(mc_filter[1], ioaddr + MAR1);
1802	np->crvalue &= ~CR_W_RXMODEMASK;
1803	np->crvalue |= rx_mode;
1804	iowrite32(np->crvalue, ioaddr + TCRRCR);
1805}
1806
1807static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1808{
1809	struct netdev_private *np = netdev_priv(dev);
1810
1811	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1812	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1813	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1814}
1815
1816static int netdev_get_link_ksettings(struct net_device *dev,
1817				     struct ethtool_link_ksettings *cmd)
1818{
1819	struct netdev_private *np = netdev_priv(dev);
1820
1821	spin_lock_irq(&np->lock);
1822	mii_ethtool_get_link_ksettings(&np->mii, cmd);
1823	spin_unlock_irq(&np->lock);
1824
1825	return 0;
1826}
1827
1828static int netdev_set_link_ksettings(struct net_device *dev,
1829				     const struct ethtool_link_ksettings *cmd)
1830{
1831	struct netdev_private *np = netdev_priv(dev);
1832	int rc;
1833
1834	spin_lock_irq(&np->lock);
1835	rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1836	spin_unlock_irq(&np->lock);
1837
1838	return rc;
1839}
1840
1841static int netdev_nway_reset(struct net_device *dev)
1842{
1843	struct netdev_private *np = netdev_priv(dev);
1844	return mii_nway_restart(&np->mii);
1845}
1846
1847static u32 netdev_get_link(struct net_device *dev)
1848{
1849	struct netdev_private *np = netdev_priv(dev);
1850	return mii_link_ok(&np->mii);
1851}
1852
1853static u32 netdev_get_msglevel(struct net_device *dev)
1854{
1855	return debug;
1856}
1857
1858static void netdev_set_msglevel(struct net_device *dev, u32 value)
1859{
1860	debug = value;
1861}
1862
1863static const struct ethtool_ops netdev_ethtool_ops = {
1864	.get_drvinfo		= netdev_get_drvinfo,
1865	.nway_reset		= netdev_nway_reset,
1866	.get_link		= netdev_get_link,
1867	.get_msglevel		= netdev_get_msglevel,
1868	.set_msglevel		= netdev_set_msglevel,
1869	.get_link_ksettings	= netdev_get_link_ksettings,
1870	.set_link_ksettings	= netdev_set_link_ksettings,
1871};
1872
1873static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1874{
1875	struct netdev_private *np = netdev_priv(dev);
1876	int rc;
1877
1878	if (!netif_running(dev))
1879		return -EINVAL;
1880
1881	spin_lock_irq(&np->lock);
1882	rc = generic_mii_ioctl(&np->mii, if_mii(rq), cmd, NULL);
1883	spin_unlock_irq(&np->lock);
1884
1885	return rc;
1886}
1887
1888
1889static int netdev_close(struct net_device *dev)
1890{
1891	struct netdev_private *np = netdev_priv(dev);
1892	void __iomem *ioaddr = np->mem;
1893	int i;
1894
1895	netif_stop_queue(dev);
1896
1897	/* Disable interrupts by clearing the interrupt mask. */
1898	iowrite32(0x0000, ioaddr + IMR);
1899
1900	/* Stop the chip's Tx and Rx processes. */
1901	stop_nic_rxtx(ioaddr, 0);
1902
1903	del_timer_sync(&np->timer);
1904	del_timer_sync(&np->reset_timer);
1905
1906	free_irq(np->pci_dev->irq, dev);
1907
1908	/* Free all the skbuffs in the Rx queue. */
1909	for (i = 0; i < RX_RING_SIZE; i++) {
1910		struct sk_buff *skb = np->rx_ring[i].skbuff;
1911
1912		np->rx_ring[i].status = 0;
1913		if (skb) {
1914			pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
1915				np->rx_buf_sz, PCI_DMA_FROMDEVICE);
 
1916			dev_kfree_skb(skb);
1917			np->rx_ring[i].skbuff = NULL;
1918		}
1919	}
1920
1921	for (i = 0; i < TX_RING_SIZE; i++) {
1922		struct sk_buff *skb = np->tx_ring[i].skbuff;
1923
1924		if (skb) {
1925			pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
1926				skb->len, PCI_DMA_TODEVICE);
 
1927			dev_kfree_skb(skb);
1928			np->tx_ring[i].skbuff = NULL;
1929		}
1930	}
1931
1932	return 0;
1933}
1934
1935static const struct pci_device_id fealnx_pci_tbl[] = {
1936	{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1937	{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1938	{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1939	{} /* terminate list */
1940};
1941MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1942
1943
1944static struct pci_driver fealnx_driver = {
1945	.name		= "fealnx",
1946	.id_table	= fealnx_pci_tbl,
1947	.probe		= fealnx_init_one,
1948	.remove		= fealnx_remove_one,
1949};
1950
1951static int __init fealnx_init(void)
1952{
1953/* when a module, this is printed whether or not devices are found in probe */
1954#ifdef MODULE
1955	printk(version);
1956#endif
1957
1958	return pci_register_driver(&fealnx_driver);
1959}
1960
1961static void __exit fealnx_exit(void)
1962{
1963	pci_unregister_driver(&fealnx_driver);
1964}
1965
1966module_init(fealnx_init);
1967module_exit(fealnx_exit);