Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
   1/*
   2 * tc35815.c: A TOSHIBA TC35815CF PCI 10/100Mbps ethernet driver for linux.
   3 *
   4 * Based on skelton.c by Donald Becker.
   5 *
   6 * This driver is a replacement of older and less maintained version.
   7 * This is a header of the older version:
   8 *	-----<snip>-----
   9 *	Copyright 2001 MontaVista Software Inc.
  10 *	Author: MontaVista Software, Inc.
  11 *		ahennessy@mvista.com
  12 *	Copyright (C) 2000-2001 Toshiba Corporation
  13 *	static const char *version =
  14 *		"tc35815.c:v0.00 26/07/2000 by Toshiba Corporation\n";
  15 *	-----<snip>-----
  16 *
  17 * This file is subject to the terms and conditions of the GNU General Public
  18 * License.  See the file "COPYING" in the main directory of this archive
  19 * for more details.
  20 *
  21 * (C) Copyright TOSHIBA CORPORATION 2004-2005
  22 * All Rights Reserved.
  23 */
  24
  25#define DRV_VERSION	"1.39"
  26static const char *version = "tc35815.c:v" DRV_VERSION "\n";
  27#define MODNAME			"tc35815"
  28
  29#include <linux/module.h>
  30#include <linux/kernel.h>
  31#include <linux/types.h>
  32#include <linux/fcntl.h>
  33#include <linux/interrupt.h>
  34#include <linux/ioport.h>
  35#include <linux/in.h>
  36#include <linux/if_vlan.h>
  37#include <linux/slab.h>
  38#include <linux/string.h>
  39#include <linux/spinlock.h>
  40#include <linux/errno.h>
  41#include <linux/netdevice.h>
  42#include <linux/etherdevice.h>
  43#include <linux/skbuff.h>
  44#include <linux/delay.h>
  45#include <linux/pci.h>
  46#include <linux/phy.h>
  47#include <linux/workqueue.h>
  48#include <linux/platform_device.h>
  49#include <linux/prefetch.h>
  50#include <asm/io.h>
  51#include <asm/byteorder.h>
  52
  53enum tc35815_chiptype {
  54	TC35815CF = 0,
  55	TC35815_NWU,
  56	TC35815_TX4939,
  57};
  58
  59/* indexed by tc35815_chiptype, above */
  60static const struct {
  61	const char *name;
  62} chip_info[] = {
  63	{ "TOSHIBA TC35815CF 10/100BaseTX" },
  64	{ "TOSHIBA TC35815 with Wake on LAN" },
  65	{ "TOSHIBA TC35815/TX4939" },
  66};
  67
  68static const struct pci_device_id tc35815_pci_tbl[] = {
  69	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
  70	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
  71	{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
  72	{0,}
  73};
  74MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
  75
  76/* see MODULE_PARM_DESC */
  77static struct tc35815_options {
  78	int speed;
  79	int duplex;
  80} options;
  81
  82/*
  83 * Registers
  84 */
  85struct tc35815_regs {
  86	__u32 DMA_Ctl;		/* 0x00 */
  87	__u32 TxFrmPtr;
  88	__u32 TxThrsh;
  89	__u32 TxPollCtr;
  90	__u32 BLFrmPtr;
  91	__u32 RxFragSize;
  92	__u32 Int_En;
  93	__u32 FDA_Bas;
  94	__u32 FDA_Lim;		/* 0x20 */
  95	__u32 Int_Src;
  96	__u32 unused0[2];
  97	__u32 PauseCnt;
  98	__u32 RemPauCnt;
  99	__u32 TxCtlFrmStat;
 100	__u32 unused1;
 101	__u32 MAC_Ctl;		/* 0x40 */
 102	__u32 CAM_Ctl;
 103	__u32 Tx_Ctl;
 104	__u32 Tx_Stat;
 105	__u32 Rx_Ctl;
 106	__u32 Rx_Stat;
 107	__u32 MD_Data;
 108	__u32 MD_CA;
 109	__u32 CAM_Adr;		/* 0x60 */
 110	__u32 CAM_Data;
 111	__u32 CAM_Ena;
 112	__u32 PROM_Ctl;
 113	__u32 PROM_Data;
 114	__u32 Algn_Cnt;
 115	__u32 CRC_Cnt;
 116	__u32 Miss_Cnt;
 117};
 118
 119/*
 120 * Bit assignments
 121 */
 122/* DMA_Ctl bit assign ------------------------------------------------------- */
 123#define DMA_RxAlign	       0x00c00000 /* 1:Reception Alignment	     */
 124#define DMA_RxAlign_1	       0x00400000
 125#define DMA_RxAlign_2	       0x00800000
 126#define DMA_RxAlign_3	       0x00c00000
 127#define DMA_M66EnStat	       0x00080000 /* 1:66MHz Enable State	     */
 128#define DMA_IntMask	       0x00040000 /* 1:Interrupt mask		     */
 129#define DMA_SWIntReq	       0x00020000 /* 1:Software Interrupt request    */
 130#define DMA_TxWakeUp	       0x00010000 /* 1:Transmit Wake Up		     */
 131#define DMA_RxBigE	       0x00008000 /* 1:Receive Big Endian	     */
 132#define DMA_TxBigE	       0x00004000 /* 1:Transmit Big Endian	     */
 133#define DMA_TestMode	       0x00002000 /* 1:Test Mode		     */
 134#define DMA_PowrMgmnt	       0x00001000 /* 1:Power Management		     */
 135#define DMA_DmBurst_Mask       0x000001fc /* DMA Burst size		     */
 136
 137/* RxFragSize bit assign ---------------------------------------------------- */
 138#define RxFrag_EnPack	       0x00008000 /* 1:Enable Packing		     */
 139#define RxFrag_MinFragMask     0x00000ffc /* Minimum Fragment		     */
 140
 141/* MAC_Ctl bit assign ------------------------------------------------------- */
 142#define MAC_Link10	       0x00008000 /* 1:Link Status 10Mbits	     */
 143#define MAC_EnMissRoll	       0x00002000 /* 1:Enable Missed Roll	     */
 144#define MAC_MissRoll	       0x00000400 /* 1:Missed Roll		     */
 145#define MAC_Loop10	       0x00000080 /* 1:Loop 10 Mbps		     */
 146#define MAC_Conn_Auto	       0x00000000 /*00:Connection mode (Automatic)   */
 147#define MAC_Conn_10M	       0x00000020 /*01:		       (10Mbps endec)*/
 148#define MAC_Conn_Mll	       0x00000040 /*10:		       (Mll clock)   */
 149#define MAC_MacLoop	       0x00000010 /* 1:MAC Loopback		     */
 150#define MAC_FullDup	       0x00000008 /* 1:Full Duplex 0:Half Duplex     */
 151#define MAC_Reset	       0x00000004 /* 1:Software Reset		     */
 152#define MAC_HaltImm	       0x00000002 /* 1:Halt Immediate		     */
 153#define MAC_HaltReq	       0x00000001 /* 1:Halt request		     */
 154
 155/* PROM_Ctl bit assign ------------------------------------------------------ */
 156#define PROM_Busy	       0x00008000 /* 1:Busy (Start Operation)	     */
 157#define PROM_Read	       0x00004000 /*10:Read operation		     */
 158#define PROM_Write	       0x00002000 /*01:Write operation		     */
 159#define PROM_Erase	       0x00006000 /*11:Erase operation		     */
 160					  /*00:Enable or Disable Writting,   */
 161					  /*	  as specified in PROM_Addr. */
 162#define PROM_Addr_Ena	       0x00000030 /*11xxxx:PROM Write enable	     */
 163					  /*00xxxx:	      disable	     */
 164
 165/* CAM_Ctl bit assign ------------------------------------------------------- */
 166#define CAM_CompEn	       0x00000010 /* 1:CAM Compare Enable	     */
 167#define CAM_NegCAM	       0x00000008 /* 1:Reject packets CAM recognizes,*/
 168					  /*			accept other */
 169#define CAM_BroadAcc	       0x00000004 /* 1:Broadcast assept		     */
 170#define CAM_GroupAcc	       0x00000002 /* 1:Multicast assept		     */
 171#define CAM_StationAcc	       0x00000001 /* 1:unicast accept		     */
 172
 173/* CAM_Ena bit assign ------------------------------------------------------- */
 174#define CAM_ENTRY_MAX		       21   /* CAM Data entry max count	     */
 175#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits)  */
 176#define CAM_Ena_Bit(index)	(1 << (index))
 177#define CAM_ENTRY_DESTINATION	0
 178#define CAM_ENTRY_SOURCE	1
 179#define CAM_ENTRY_MACCTL	20
 180
 181/* Tx_Ctl bit assign -------------------------------------------------------- */
 182#define Tx_En		       0x00000001 /* 1:Transmit enable		     */
 183#define Tx_TxHalt	       0x00000002 /* 1:Transmit Halt Request	     */
 184#define Tx_NoPad	       0x00000004 /* 1:Suppress Padding		     */
 185#define Tx_NoCRC	       0x00000008 /* 1:Suppress Padding		     */
 186#define Tx_FBack	       0x00000010 /* 1:Fast Back-off		     */
 187#define Tx_EnUnder	       0x00000100 /* 1:Enable Underrun		     */
 188#define Tx_EnExDefer	       0x00000200 /* 1:Enable Excessive Deferral     */
 189#define Tx_EnLCarr	       0x00000400 /* 1:Enable Lost Carrier	     */
 190#define Tx_EnExColl	       0x00000800 /* 1:Enable Excessive Collision    */
 191#define Tx_EnLateColl	       0x00001000 /* 1:Enable Late Collision	     */
 192#define Tx_EnTxPar	       0x00002000 /* 1:Enable Transmit Parity	     */
 193#define Tx_EnComp	       0x00004000 /* 1:Enable Completion	     */
 194
 195/* Tx_Stat bit assign ------------------------------------------------------- */
 196#define Tx_TxColl_MASK	       0x0000000F /* Tx Collision Count		     */
 197#define Tx_ExColl	       0x00000010 /* Excessive Collision	     */
 198#define Tx_TXDefer	       0x00000020 /* Transmit Defered		     */
 199#define Tx_Paused	       0x00000040 /* Transmit Paused		     */
 200#define Tx_IntTx	       0x00000080 /* Interrupt on Tx		     */
 201#define Tx_Under	       0x00000100 /* Underrun			     */
 202#define Tx_Defer	       0x00000200 /* Deferral			     */
 203#define Tx_NCarr	       0x00000400 /* No Carrier			     */
 204#define Tx_10Stat	       0x00000800 /* 10Mbps Status		     */
 205#define Tx_LateColl	       0x00001000 /* Late Collision		     */
 206#define Tx_TxPar	       0x00002000 /* Tx Parity Error		     */
 207#define Tx_Comp		       0x00004000 /* Completion			     */
 208#define Tx_Halted	       0x00008000 /* Tx Halted			     */
 209#define Tx_SQErr	       0x00010000 /* Signal Quality Error(SQE)	     */
 210
 211/* Rx_Ctl bit assign -------------------------------------------------------- */
 212#define Rx_EnGood	       0x00004000 /* 1:Enable Good		     */
 213#define Rx_EnRxPar	       0x00002000 /* 1:Enable Receive Parity	     */
 214#define Rx_EnLongErr	       0x00000800 /* 1:Enable Long Error	     */
 215#define Rx_EnOver	       0x00000400 /* 1:Enable OverFlow		     */
 216#define Rx_EnCRCErr	       0x00000200 /* 1:Enable CRC Error		     */
 217#define Rx_EnAlign	       0x00000100 /* 1:Enable Alignment		     */
 218#define Rx_IgnoreCRC	       0x00000040 /* 1:Ignore CRC Value		     */
 219#define Rx_StripCRC	       0x00000010 /* 1:Strip CRC Value		     */
 220#define Rx_ShortEn	       0x00000008 /* 1:Short Enable		     */
 221#define Rx_LongEn	       0x00000004 /* 1:Long Enable		     */
 222#define Rx_RxHalt	       0x00000002 /* 1:Receive Halt Request	     */
 223#define Rx_RxEn		       0x00000001 /* 1:Receive Intrrupt Enable	     */
 224
 225/* Rx_Stat bit assign ------------------------------------------------------- */
 226#define Rx_Halted	       0x00008000 /* Rx Halted			     */
 227#define Rx_Good		       0x00004000 /* Rx Good			     */
 228#define Rx_RxPar	       0x00002000 /* Rx Parity Error		     */
 229#define Rx_TypePkt	       0x00001000 /* Rx Type Packet		     */
 230#define Rx_LongErr	       0x00000800 /* Rx Long Error		     */
 231#define Rx_Over		       0x00000400 /* Rx Overflow		     */
 232#define Rx_CRCErr	       0x00000200 /* Rx CRC Error		     */
 233#define Rx_Align	       0x00000100 /* Rx Alignment Error		     */
 234#define Rx_10Stat	       0x00000080 /* Rx 10Mbps Status		     */
 235#define Rx_IntRx	       0x00000040 /* Rx Interrupt		     */
 236#define Rx_CtlRecd	       0x00000020 /* Rx Control Receive		     */
 237#define Rx_InLenErr	       0x00000010 /* Rx In Range Frame Length Error  */
 238
 239#define Rx_Stat_Mask	       0x0000FFF0 /* Rx All Status Mask		     */
 240
 241/* Int_En bit assign -------------------------------------------------------- */
 242#define Int_NRAbtEn	       0x00000800 /* 1:Non-recoverable Abort Enable  */
 243#define Int_TxCtlCmpEn	       0x00000400 /* 1:Transmit Ctl Complete Enable  */
 244#define Int_DmParErrEn	       0x00000200 /* 1:DMA Parity Error Enable	     */
 245#define Int_DParDEn	       0x00000100 /* 1:Data Parity Error Enable	     */
 246#define Int_EarNotEn	       0x00000080 /* 1:Early Notify Enable	     */
 247#define Int_DParErrEn	       0x00000040 /* 1:Detected Parity Error Enable  */
 248#define Int_SSysErrEn	       0x00000020 /* 1:Signalled System Error Enable */
 249#define Int_RMasAbtEn	       0x00000010 /* 1:Received Master Abort Enable  */
 250#define Int_RTargAbtEn	       0x00000008 /* 1:Received Target Abort Enable  */
 251#define Int_STargAbtEn	       0x00000004 /* 1:Signalled Target Abort Enable */
 252#define Int_BLExEn	       0x00000002 /* 1:Buffer List Exhausted Enable  */
 253#define Int_FDAExEn	       0x00000001 /* 1:Free Descriptor Area	     */
 254					  /*		   Exhausted Enable  */
 255
 256/* Int_Src bit assign ------------------------------------------------------- */
 257#define Int_NRabt	       0x00004000 /* 1:Non Recoverable error	     */
 258#define Int_DmParErrStat       0x00002000 /* 1:DMA Parity Error & Clear	     */
 259#define Int_BLEx	       0x00001000 /* 1:Buffer List Empty & Clear     */
 260#define Int_FDAEx	       0x00000800 /* 1:FDA Empty & Clear	     */
 261#define Int_IntNRAbt	       0x00000400 /* 1:Non Recoverable Abort	     */
 262#define Int_IntCmp	       0x00000200 /* 1:MAC control packet complete   */
 263#define Int_IntExBD	       0x00000100 /* 1:Interrupt Extra BD & Clear    */
 264#define Int_DmParErr	       0x00000080 /* 1:DMA Parity Error & Clear	     */
 265#define Int_IntEarNot	       0x00000040 /* 1:Receive Data write & Clear    */
 266#define Int_SWInt	       0x00000020 /* 1:Software request & Clear	     */
 267#define Int_IntBLEx	       0x00000010 /* 1:Buffer List Empty & Clear     */
 268#define Int_IntFDAEx	       0x00000008 /* 1:FDA Empty & Clear	     */
 269#define Int_IntPCI	       0x00000004 /* 1:PCI controller & Clear	     */
 270#define Int_IntMacRx	       0x00000002 /* 1:Rx controller & Clear	     */
 271#define Int_IntMacTx	       0x00000001 /* 1:Tx controller & Clear	     */
 272
 273/* MD_CA bit assign --------------------------------------------------------- */
 274#define MD_CA_PreSup	       0x00001000 /* 1:Preamble Suppress		     */
 275#define MD_CA_Busy	       0x00000800 /* 1:Busy (Start Operation)	     */
 276#define MD_CA_Wr	       0x00000400 /* 1:Write 0:Read		     */
 277
 278
 279/*
 280 * Descriptors
 281 */
 282
 283/* Frame descripter */
 284struct FDesc {
 285	volatile __u32 FDNext;
 286	volatile __u32 FDSystem;
 287	volatile __u32 FDStat;
 288	volatile __u32 FDCtl;
 289};
 290
 291/* Buffer descripter */
 292struct BDesc {
 293	volatile __u32 BuffData;
 294	volatile __u32 BDCtl;
 295};
 296
 297#define FD_ALIGN	16
 298
 299/* Frame Descripter bit assign ---------------------------------------------- */
 300#define FD_FDLength_MASK       0x0000FFFF /* Length MASK		     */
 301#define FD_BDCnt_MASK	       0x001F0000 /* BD count MASK in FD	     */
 302#define FD_FrmOpt_MASK	       0x7C000000 /* Frame option MASK		     */
 303#define FD_FrmOpt_BigEndian    0x40000000 /* Tx/Rx */
 304#define FD_FrmOpt_IntTx	       0x20000000 /* Tx only */
 305#define FD_FrmOpt_NoCRC	       0x10000000 /* Tx only */
 306#define FD_FrmOpt_NoPadding    0x08000000 /* Tx only */
 307#define FD_FrmOpt_Packing      0x04000000 /* Rx only */
 308#define FD_CownsFD	       0x80000000 /* FD Controller owner bit	     */
 309#define FD_Next_EOL	       0x00000001 /* FD EOL indicator		     */
 310#define FD_BDCnt_SHIFT	       16
 311
 312/* Buffer Descripter bit assign --------------------------------------------- */
 313#define BD_BuffLength_MASK     0x0000FFFF /* Receive Data Size		     */
 314#define BD_RxBDID_MASK	       0x00FF0000 /* BD ID Number MASK		     */
 315#define BD_RxBDSeqN_MASK       0x7F000000 /* Rx BD Sequence Number	     */
 316#define BD_CownsBD	       0x80000000 /* BD Controller owner bit	     */
 317#define BD_RxBDID_SHIFT	       16
 318#define BD_RxBDSeqN_SHIFT      24
 319
 320
 321/* Some useful constants. */
 322
 323#define TX_CTL_CMD	(Tx_EnTxPar | Tx_EnLateColl | \
 324	Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
 325	Tx_En)	/* maybe  0x7b01 */
 326/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
 327#define RX_CTL_CMD	(Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
 328	| Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
 329#define INT_EN_CMD  (Int_NRAbtEn | \
 330	Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
 331	Int_SSysErrEn  | Int_RMasAbtEn | Int_RTargAbtEn | \
 332	Int_STargAbtEn | \
 333	Int_BLExEn  | Int_FDAExEn) /* maybe 0xb7f*/
 334#define DMA_CTL_CMD	DMA_BURST_SIZE
 335#define HAVE_DMA_RXALIGN(lp)	likely((lp)->chiptype != TC35815CF)
 336
 337/* Tuning parameters */
 338#define DMA_BURST_SIZE	32
 339#define TX_THRESHOLD	1024
 340/* used threshold with packet max byte for low pci transfer ability.*/
 341#define TX_THRESHOLD_MAX 1536
 342/* setting threshold max value when overrun error occurred this count. */
 343#define TX_THRESHOLD_KEEP_LIMIT 10
 344
 345/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
 346#define FD_PAGE_NUM 4
 347#define RX_BUF_NUM	128	/* < 256 */
 348#define RX_FD_NUM	256	/* >= 32 */
 349#define TX_FD_NUM	128
 350#if RX_CTL_CMD & Rx_LongEn
 351#define RX_BUF_SIZE	PAGE_SIZE
 352#elif RX_CTL_CMD & Rx_StripCRC
 353#define RX_BUF_SIZE	\
 354	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
 355#else
 356#define RX_BUF_SIZE	\
 357	L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
 358#endif
 359#define RX_FD_RESERVE	(2 / 2)	/* max 2 BD per RxFD */
 360#define NAPI_WEIGHT	16
 361
 362struct TxFD {
 363	struct FDesc fd;
 364	struct BDesc bd;
 365	struct BDesc unused;
 366};
 367
 368struct RxFD {
 369	struct FDesc fd;
 370	struct BDesc bd[0];	/* variable length */
 371};
 372
 373struct FrFD {
 374	struct FDesc fd;
 375	struct BDesc bd[RX_BUF_NUM];
 376};
 377
 378
 379#define tc_readl(addr)	ioread32(addr)
 380#define tc_writel(d, addr)	iowrite32(d, addr)
 381
 382#define TC35815_TX_TIMEOUT  msecs_to_jiffies(400)
 383
 384/* Information that need to be kept for each controller. */
 385struct tc35815_local {
 386	struct pci_dev *pci_dev;
 387
 388	struct net_device *dev;
 389	struct napi_struct napi;
 390
 391	/* statistics */
 392	struct {
 393		int max_tx_qlen;
 394		int tx_ints;
 395		int rx_ints;
 396		int tx_underrun;
 397	} lstats;
 398
 399	/* Tx control lock.  This protects the transmit buffer ring
 400	 * state along with the "tx full" state of the driver.  This
 401	 * means all netif_queue flow control actions are protected
 402	 * by this lock as well.
 403	 */
 404	spinlock_t lock;
 405	spinlock_t rx_lock;
 406
 407	struct mii_bus *mii_bus;
 408	struct phy_device *phy_dev;
 409	int duplex;
 410	int speed;
 411	int link;
 412	struct work_struct restart_work;
 413
 414	/*
 415	 * Transmitting: Batch Mode.
 416	 *	1 BD in 1 TxFD.
 417	 * Receiving: Non-Packing Mode.
 418	 *	1 circular FD for Free Buffer List.
 419	 *	RX_BUF_NUM BD in Free Buffer FD.
 420	 *	One Free Buffer BD has ETH_FRAME_LEN data buffer.
 421	 */
 422	void *fd_buf;	/* for TxFD, RxFD, FrFD */
 423	dma_addr_t fd_buf_dma;
 424	struct TxFD *tfd_base;
 425	unsigned int tfd_start;
 426	unsigned int tfd_end;
 427	struct RxFD *rfd_base;
 428	struct RxFD *rfd_limit;
 429	struct RxFD *rfd_cur;
 430	struct FrFD *fbl_ptr;
 431	unsigned int fbl_count;
 432	struct {
 433		struct sk_buff *skb;
 434		dma_addr_t skb_dma;
 435	} tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
 436	u32 msg_enable;
 437	enum tc35815_chiptype chiptype;
 438};
 439
 440static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
 441{
 442	return lp->fd_buf_dma + ((u8 *)virt - (u8 *)lp->fd_buf);
 443}
 444#ifdef DEBUG
 445static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
 446{
 447	return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
 448}
 449#endif
 450static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
 451				       struct pci_dev *hwdev,
 452				       dma_addr_t *dma_handle)
 453{
 454	struct sk_buff *skb;
 455	skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
 456	if (!skb)
 457		return NULL;
 458	*dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
 459				     PCI_DMA_FROMDEVICE);
 460	if (pci_dma_mapping_error(hwdev, *dma_handle)) {
 461		dev_kfree_skb_any(skb);
 462		return NULL;
 463	}
 464	skb_reserve(skb, 2);	/* make IP header 4byte aligned */
 465	return skb;
 466}
 467
 468static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
 469{
 470	pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
 471			 PCI_DMA_FROMDEVICE);
 472	dev_kfree_skb_any(skb);
 473}
 474
 475/* Index to functions, as function prototypes. */
 476
 477static int	tc35815_open(struct net_device *dev);
 478static int	tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
 479static irqreturn_t	tc35815_interrupt(int irq, void *dev_id);
 480static int	tc35815_rx(struct net_device *dev, int limit);
 481static int	tc35815_poll(struct napi_struct *napi, int budget);
 482static void	tc35815_txdone(struct net_device *dev);
 483static int	tc35815_close(struct net_device *dev);
 484static struct	net_device_stats *tc35815_get_stats(struct net_device *dev);
 485static void	tc35815_set_multicast_list(struct net_device *dev);
 486static void	tc35815_tx_timeout(struct net_device *dev);
 487static int	tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 488#ifdef CONFIG_NET_POLL_CONTROLLER
 489static void	tc35815_poll_controller(struct net_device *dev);
 490#endif
 491static const struct ethtool_ops tc35815_ethtool_ops;
 492
 493/* Example routines you must write ;->. */
 494static void	tc35815_chip_reset(struct net_device *dev);
 495static void	tc35815_chip_init(struct net_device *dev);
 496
 497#ifdef DEBUG
 498static void	panic_queues(struct net_device *dev);
 499#endif
 500
 501static void tc35815_restart_work(struct work_struct *work);
 502
 503static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 504{
 505	struct net_device *dev = bus->priv;
 506	struct tc35815_regs __iomem *tr =
 507		(struct tc35815_regs __iomem *)dev->base_addr;
 508	unsigned long timeout = jiffies + HZ;
 509
 510	tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
 511	udelay(12); /* it takes 32 x 400ns at least */
 512	while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
 513		if (time_after(jiffies, timeout))
 514			return -EIO;
 515		cpu_relax();
 516	}
 517	return tc_readl(&tr->MD_Data) & 0xffff;
 518}
 519
 520static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
 521{
 522	struct net_device *dev = bus->priv;
 523	struct tc35815_regs __iomem *tr =
 524		(struct tc35815_regs __iomem *)dev->base_addr;
 525	unsigned long timeout = jiffies + HZ;
 526
 527	tc_writel(val, &tr->MD_Data);
 528	tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
 529		  &tr->MD_CA);
 530	udelay(12); /* it takes 32 x 400ns at least */
 531	while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
 532		if (time_after(jiffies, timeout))
 533			return -EIO;
 534		cpu_relax();
 535	}
 536	return 0;
 537}
 538
 539static void tc_handle_link_change(struct net_device *dev)
 540{
 541	struct tc35815_local *lp = netdev_priv(dev);
 542	struct phy_device *phydev = lp->phy_dev;
 543	unsigned long flags;
 544	int status_change = 0;
 545
 546	spin_lock_irqsave(&lp->lock, flags);
 547	if (phydev->link &&
 548	    (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
 549		struct tc35815_regs __iomem *tr =
 550			(struct tc35815_regs __iomem *)dev->base_addr;
 551		u32 reg;
 552
 553		reg = tc_readl(&tr->MAC_Ctl);
 554		reg |= MAC_HaltReq;
 555		tc_writel(reg, &tr->MAC_Ctl);
 556		if (phydev->duplex == DUPLEX_FULL)
 557			reg |= MAC_FullDup;
 558		else
 559			reg &= ~MAC_FullDup;
 560		tc_writel(reg, &tr->MAC_Ctl);
 561		reg &= ~MAC_HaltReq;
 562		tc_writel(reg, &tr->MAC_Ctl);
 563
 564		/*
 565		 * TX4939 PCFG.SPEEDn bit will be changed on
 566		 * NETDEV_CHANGE event.
 567		 */
 568		/*
 569		 * WORKAROUND: enable LostCrS only if half duplex
 570		 * operation.
 571		 * (TX4939 does not have EnLCarr)
 572		 */
 573		if (phydev->duplex == DUPLEX_HALF &&
 574		    lp->chiptype != TC35815_TX4939)
 575			tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
 576				  &tr->Tx_Ctl);
 577
 578		lp->speed = phydev->speed;
 579		lp->duplex = phydev->duplex;
 580		status_change = 1;
 581	}
 582
 583	if (phydev->link != lp->link) {
 584		if (phydev->link) {
 585			/* delayed promiscuous enabling */
 586			if (dev->flags & IFF_PROMISC)
 587				tc35815_set_multicast_list(dev);
 588		} else {
 589			lp->speed = 0;
 590			lp->duplex = -1;
 591		}
 592		lp->link = phydev->link;
 593
 594		status_change = 1;
 595	}
 596	spin_unlock_irqrestore(&lp->lock, flags);
 597
 598	if (status_change && netif_msg_link(lp)) {
 599		phy_print_status(phydev);
 600		pr_debug("%s: MII BMCR %04x BMSR %04x LPA %04x\n",
 601			 dev->name,
 602			 phy_read(phydev, MII_BMCR),
 603			 phy_read(phydev, MII_BMSR),
 604			 phy_read(phydev, MII_LPA));
 605	}
 606}
 607
 608static int tc_mii_probe(struct net_device *dev)
 609{
 610	struct tc35815_local *lp = netdev_priv(dev);
 611	struct phy_device *phydev;
 612	u32 dropmask;
 613
 614	phydev = phy_find_first(lp->mii_bus);
 615	if (!phydev) {
 616		printk(KERN_ERR "%s: no PHY found\n", dev->name);
 617		return -ENODEV;
 618	}
 619
 620	/* attach the mac to the phy */
 621	phydev = phy_connect(dev, phydev_name(phydev),
 622			     &tc_handle_link_change,
 623			     lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
 624	if (IS_ERR(phydev)) {
 625		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
 626		return PTR_ERR(phydev);
 627	}
 628
 629	phy_attached_info(phydev);
 630
 631	/* mask with MAC supported features */
 632	phydev->supported &= PHY_BASIC_FEATURES;
 633	dropmask = 0;
 634	if (options.speed == 10)
 635		dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
 636	else if (options.speed == 100)
 637		dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
 638	if (options.duplex == 1)
 639		dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
 640	else if (options.duplex == 2)
 641		dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
 642	phydev->supported &= ~dropmask;
 643	phydev->advertising = phydev->supported;
 644
 645	lp->link = 0;
 646	lp->speed = 0;
 647	lp->duplex = -1;
 648	lp->phy_dev = phydev;
 649
 650	return 0;
 651}
 652
 653static int tc_mii_init(struct net_device *dev)
 654{
 655	struct tc35815_local *lp = netdev_priv(dev);
 656	int err;
 657
 658	lp->mii_bus = mdiobus_alloc();
 659	if (lp->mii_bus == NULL) {
 660		err = -ENOMEM;
 661		goto err_out;
 662	}
 663
 664	lp->mii_bus->name = "tc35815_mii_bus";
 665	lp->mii_bus->read = tc_mdio_read;
 666	lp->mii_bus->write = tc_mdio_write;
 667	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x",
 668		 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
 669	lp->mii_bus->priv = dev;
 670	lp->mii_bus->parent = &lp->pci_dev->dev;
 671	err = mdiobus_register(lp->mii_bus);
 672	if (err)
 673		goto err_out_free_mii_bus;
 674	err = tc_mii_probe(dev);
 675	if (err)
 676		goto err_out_unregister_bus;
 677	return 0;
 678
 679err_out_unregister_bus:
 680	mdiobus_unregister(lp->mii_bus);
 681err_out_free_mii_bus:
 682	mdiobus_free(lp->mii_bus);
 683err_out:
 684	return err;
 685}
 686
 687#ifdef CONFIG_CPU_TX49XX
 688/*
 689 * Find a platform_device providing a MAC address.  The platform code
 690 * should provide a "tc35815-mac" device with a MAC address in its
 691 * platform_data.
 692 */
 693static int tc35815_mac_match(struct device *dev, void *data)
 694{
 695	struct platform_device *plat_dev = to_platform_device(dev);
 696	struct pci_dev *pci_dev = data;
 697	unsigned int id = pci_dev->irq;
 698	return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
 699}
 700
 701static int tc35815_read_plat_dev_addr(struct net_device *dev)
 702{
 703	struct tc35815_local *lp = netdev_priv(dev);
 704	struct device *pd = bus_find_device(&platform_bus_type, NULL,
 705					    lp->pci_dev, tc35815_mac_match);
 706	if (pd) {
 707		if (pd->platform_data)
 708			memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
 709		put_device(pd);
 710		return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
 711	}
 712	return -ENODEV;
 713}
 714#else
 715static int tc35815_read_plat_dev_addr(struct net_device *dev)
 716{
 717	return -ENODEV;
 718}
 719#endif
 720
 721static int tc35815_init_dev_addr(struct net_device *dev)
 722{
 723	struct tc35815_regs __iomem *tr =
 724		(struct tc35815_regs __iomem *)dev->base_addr;
 725	int i;
 726
 727	while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
 728		;
 729	for (i = 0; i < 6; i += 2) {
 730		unsigned short data;
 731		tc_writel(PROM_Busy | PROM_Read | (i / 2 + 2), &tr->PROM_Ctl);
 732		while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
 733			;
 734		data = tc_readl(&tr->PROM_Data);
 735		dev->dev_addr[i] = data & 0xff;
 736		dev->dev_addr[i+1] = data >> 8;
 737	}
 738	if (!is_valid_ether_addr(dev->dev_addr))
 739		return tc35815_read_plat_dev_addr(dev);
 740	return 0;
 741}
 742
 743static const struct net_device_ops tc35815_netdev_ops = {
 744	.ndo_open		= tc35815_open,
 745	.ndo_stop		= tc35815_close,
 746	.ndo_start_xmit		= tc35815_send_packet,
 747	.ndo_get_stats		= tc35815_get_stats,
 748	.ndo_set_rx_mode	= tc35815_set_multicast_list,
 749	.ndo_tx_timeout		= tc35815_tx_timeout,
 750	.ndo_do_ioctl		= tc35815_ioctl,
 751	.ndo_validate_addr	= eth_validate_addr,
 752	.ndo_change_mtu		= eth_change_mtu,
 753	.ndo_set_mac_address	= eth_mac_addr,
 754#ifdef CONFIG_NET_POLL_CONTROLLER
 755	.ndo_poll_controller	= tc35815_poll_controller,
 756#endif
 757};
 758
 759static int tc35815_init_one(struct pci_dev *pdev,
 760			    const struct pci_device_id *ent)
 761{
 762	void __iomem *ioaddr = NULL;
 763	struct net_device *dev;
 764	struct tc35815_local *lp;
 765	int rc;
 766
 767	static int printed_version;
 768	if (!printed_version++) {
 769		printk(version);
 770		dev_printk(KERN_DEBUG, &pdev->dev,
 771			   "speed:%d duplex:%d\n",
 772			   options.speed, options.duplex);
 773	}
 774
 775	if (!pdev->irq) {
 776		dev_warn(&pdev->dev, "no IRQ assigned.\n");
 777		return -ENODEV;
 778	}
 779
 780	/* dev zeroed in alloc_etherdev */
 781	dev = alloc_etherdev(sizeof(*lp));
 782	if (dev == NULL)
 783		return -ENOMEM;
 784
 785	SET_NETDEV_DEV(dev, &pdev->dev);
 786	lp = netdev_priv(dev);
 787	lp->dev = dev;
 788
 789	/* enable device (incl. PCI PM wakeup), and bus-mastering */
 790	rc = pcim_enable_device(pdev);
 791	if (rc)
 792		goto err_out;
 793	rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
 794	if (rc)
 795		goto err_out;
 796	pci_set_master(pdev);
 797	ioaddr = pcim_iomap_table(pdev)[1];
 798
 799	/* Initialize the device structure. */
 800	dev->netdev_ops = &tc35815_netdev_ops;
 801	dev->ethtool_ops = &tc35815_ethtool_ops;
 802	dev->watchdog_timeo = TC35815_TX_TIMEOUT;
 803	netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
 804
 805	dev->irq = pdev->irq;
 806	dev->base_addr = (unsigned long)ioaddr;
 807
 808	INIT_WORK(&lp->restart_work, tc35815_restart_work);
 809	spin_lock_init(&lp->lock);
 810	spin_lock_init(&lp->rx_lock);
 811	lp->pci_dev = pdev;
 812	lp->chiptype = ent->driver_data;
 813
 814	lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
 815	pci_set_drvdata(pdev, dev);
 816
 817	/* Soft reset the chip. */
 818	tc35815_chip_reset(dev);
 819
 820	/* Retrieve the ethernet address. */
 821	if (tc35815_init_dev_addr(dev)) {
 822		dev_warn(&pdev->dev, "not valid ether addr\n");
 823		eth_hw_addr_random(dev);
 824	}
 825
 826	rc = register_netdev(dev);
 827	if (rc)
 828		goto err_out;
 829
 830	printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
 831		dev->name,
 832		chip_info[ent->driver_data].name,
 833		dev->base_addr,
 834		dev->dev_addr,
 835		dev->irq);
 836
 837	rc = tc_mii_init(dev);
 838	if (rc)
 839		goto err_out_unregister;
 840
 841	return 0;
 842
 843err_out_unregister:
 844	unregister_netdev(dev);
 845err_out:
 846	free_netdev(dev);
 847	return rc;
 848}
 849
 850
 851static void tc35815_remove_one(struct pci_dev *pdev)
 852{
 853	struct net_device *dev = pci_get_drvdata(pdev);
 854	struct tc35815_local *lp = netdev_priv(dev);
 855
 856	phy_disconnect(lp->phy_dev);
 857	mdiobus_unregister(lp->mii_bus);
 858	mdiobus_free(lp->mii_bus);
 859	unregister_netdev(dev);
 860	free_netdev(dev);
 861}
 862
 863static int
 864tc35815_init_queues(struct net_device *dev)
 865{
 866	struct tc35815_local *lp = netdev_priv(dev);
 867	int i;
 868	unsigned long fd_addr;
 869
 870	if (!lp->fd_buf) {
 871		BUG_ON(sizeof(struct FDesc) +
 872		       sizeof(struct BDesc) * RX_BUF_NUM +
 873		       sizeof(struct FDesc) * RX_FD_NUM +
 874		       sizeof(struct TxFD) * TX_FD_NUM >
 875		       PAGE_SIZE * FD_PAGE_NUM);
 876
 877		lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
 878						  PAGE_SIZE * FD_PAGE_NUM,
 879						  &lp->fd_buf_dma);
 880		if (!lp->fd_buf)
 881			return -ENOMEM;
 882		for (i = 0; i < RX_BUF_NUM; i++) {
 883			lp->rx_skbs[i].skb =
 884				alloc_rxbuf_skb(dev, lp->pci_dev,
 885						&lp->rx_skbs[i].skb_dma);
 886			if (!lp->rx_skbs[i].skb) {
 887				while (--i >= 0) {
 888					free_rxbuf_skb(lp->pci_dev,
 889						       lp->rx_skbs[i].skb,
 890						       lp->rx_skbs[i].skb_dma);
 891					lp->rx_skbs[i].skb = NULL;
 892				}
 893				pci_free_consistent(lp->pci_dev,
 894						    PAGE_SIZE * FD_PAGE_NUM,
 895						    lp->fd_buf,
 896						    lp->fd_buf_dma);
 897				lp->fd_buf = NULL;
 898				return -ENOMEM;
 899			}
 900		}
 901		printk(KERN_DEBUG "%s: FD buf %p DataBuf",
 902		       dev->name, lp->fd_buf);
 903		printk("\n");
 904	} else {
 905		for (i = 0; i < FD_PAGE_NUM; i++)
 906			clear_page((void *)((unsigned long)lp->fd_buf +
 907					    i * PAGE_SIZE));
 908	}
 909	fd_addr = (unsigned long)lp->fd_buf;
 910
 911	/* Free Descriptors (for Receive) */
 912	lp->rfd_base = (struct RxFD *)fd_addr;
 913	fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
 914	for (i = 0; i < RX_FD_NUM; i++)
 915		lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
 916	lp->rfd_cur = lp->rfd_base;
 917	lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
 918
 919	/* Transmit Descriptors */
 920	lp->tfd_base = (struct TxFD *)fd_addr;
 921	fd_addr += sizeof(struct TxFD) * TX_FD_NUM;
 922	for (i = 0; i < TX_FD_NUM; i++) {
 923		lp->tfd_base[i].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[i+1]));
 924		lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
 925		lp->tfd_base[i].fd.FDCtl = cpu_to_le32(0);
 926	}
 927	lp->tfd_base[TX_FD_NUM-1].fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, &lp->tfd_base[0]));
 928	lp->tfd_start = 0;
 929	lp->tfd_end = 0;
 930
 931	/* Buffer List (for Receive) */
 932	lp->fbl_ptr = (struct FrFD *)fd_addr;
 933	lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
 934	lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
 935	/*
 936	 * move all allocated skbs to head of rx_skbs[] array.
 937	 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
 938	 * tc35815_rx() had failed.
 939	 */
 940	lp->fbl_count = 0;
 941	for (i = 0; i < RX_BUF_NUM; i++) {
 942		if (lp->rx_skbs[i].skb) {
 943			if (i != lp->fbl_count) {
 944				lp->rx_skbs[lp->fbl_count].skb =
 945					lp->rx_skbs[i].skb;
 946				lp->rx_skbs[lp->fbl_count].skb_dma =
 947					lp->rx_skbs[i].skb_dma;
 948			}
 949			lp->fbl_count++;
 950		}
 951	}
 952	for (i = 0; i < RX_BUF_NUM; i++) {
 953		if (i >= lp->fbl_count) {
 954			lp->fbl_ptr->bd[i].BuffData = 0;
 955			lp->fbl_ptr->bd[i].BDCtl = 0;
 956			continue;
 957		}
 958		lp->fbl_ptr->bd[i].BuffData =
 959			cpu_to_le32(lp->rx_skbs[i].skb_dma);
 960		/* BDID is index of FrFD.bd[] */
 961		lp->fbl_ptr->bd[i].BDCtl =
 962			cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
 963				    RX_BUF_SIZE);
 964	}
 965
 966	printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
 967	       dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
 968	return 0;
 969}
 970
 971static void
 972tc35815_clear_queues(struct net_device *dev)
 973{
 974	struct tc35815_local *lp = netdev_priv(dev);
 975	int i;
 976
 977	for (i = 0; i < TX_FD_NUM; i++) {
 978		u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
 979		struct sk_buff *skb =
 980			fdsystem != 0xffffffff ?
 981			lp->tx_skbs[fdsystem].skb : NULL;
 982#ifdef DEBUG
 983		if (lp->tx_skbs[i].skb != skb) {
 984			printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
 985			panic_queues(dev);
 986		}
 987#else
 988		BUG_ON(lp->tx_skbs[i].skb != skb);
 989#endif
 990		if (skb) {
 991			pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
 992			lp->tx_skbs[i].skb = NULL;
 993			lp->tx_skbs[i].skb_dma = 0;
 994			dev_kfree_skb_any(skb);
 995		}
 996		lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
 997	}
 998
 999	tc35815_init_queues(dev);
1000}
1001
1002static void
1003tc35815_free_queues(struct net_device *dev)
1004{
1005	struct tc35815_local *lp = netdev_priv(dev);
1006	int i;
1007
1008	if (lp->tfd_base) {
1009		for (i = 0; i < TX_FD_NUM; i++) {
1010			u32 fdsystem = le32_to_cpu(lp->tfd_base[i].fd.FDSystem);
1011			struct sk_buff *skb =
1012				fdsystem != 0xffffffff ?
1013				lp->tx_skbs[fdsystem].skb : NULL;
1014#ifdef DEBUG
1015			if (lp->tx_skbs[i].skb != skb) {
1016				printk("%s: tx_skbs mismatch(%d).\n", dev->name, i);
1017				panic_queues(dev);
1018			}
1019#else
1020			BUG_ON(lp->tx_skbs[i].skb != skb);
1021#endif
1022			if (skb) {
1023				dev_kfree_skb(skb);
1024				pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
1025				lp->tx_skbs[i].skb = NULL;
1026				lp->tx_skbs[i].skb_dma = 0;
1027			}
1028			lp->tfd_base[i].fd.FDSystem = cpu_to_le32(0xffffffff);
1029		}
1030	}
1031
1032	lp->rfd_base = NULL;
1033	lp->rfd_limit = NULL;
1034	lp->rfd_cur = NULL;
1035	lp->fbl_ptr = NULL;
1036
1037	for (i = 0; i < RX_BUF_NUM; i++) {
1038		if (lp->rx_skbs[i].skb) {
1039			free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1040				       lp->rx_skbs[i].skb_dma);
1041			lp->rx_skbs[i].skb = NULL;
1042		}
1043	}
1044	if (lp->fd_buf) {
1045		pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
1046				    lp->fd_buf, lp->fd_buf_dma);
1047		lp->fd_buf = NULL;
1048	}
1049}
1050
1051static void
1052dump_txfd(struct TxFD *fd)
1053{
1054	printk("TxFD(%p): %08x %08x %08x %08x\n", fd,
1055	       le32_to_cpu(fd->fd.FDNext),
1056	       le32_to_cpu(fd->fd.FDSystem),
1057	       le32_to_cpu(fd->fd.FDStat),
1058	       le32_to_cpu(fd->fd.FDCtl));
1059	printk("BD: ");
1060	printk(" %08x %08x",
1061	       le32_to_cpu(fd->bd.BuffData),
1062	       le32_to_cpu(fd->bd.BDCtl));
1063	printk("\n");
1064}
1065
1066static int
1067dump_rxfd(struct RxFD *fd)
1068{
1069	int i, bd_count = (le32_to_cpu(fd->fd.FDCtl) & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1070	if (bd_count > 8)
1071		bd_count = 8;
1072	printk("RxFD(%p): %08x %08x %08x %08x\n", fd,
1073	       le32_to_cpu(fd->fd.FDNext),
1074	       le32_to_cpu(fd->fd.FDSystem),
1075	       le32_to_cpu(fd->fd.FDStat),
1076	       le32_to_cpu(fd->fd.FDCtl));
1077	if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1078		return 0;
1079	printk("BD: ");
1080	for (i = 0; i < bd_count; i++)
1081		printk(" %08x %08x",
1082		       le32_to_cpu(fd->bd[i].BuffData),
1083		       le32_to_cpu(fd->bd[i].BDCtl));
1084	printk("\n");
1085	return bd_count;
1086}
1087
1088#ifdef DEBUG
1089static void
1090dump_frfd(struct FrFD *fd)
1091{
1092	int i;
1093	printk("FrFD(%p): %08x %08x %08x %08x\n", fd,
1094	       le32_to_cpu(fd->fd.FDNext),
1095	       le32_to_cpu(fd->fd.FDSystem),
1096	       le32_to_cpu(fd->fd.FDStat),
1097	       le32_to_cpu(fd->fd.FDCtl));
1098	printk("BD: ");
1099	for (i = 0; i < RX_BUF_NUM; i++)
1100		printk(" %08x %08x",
1101		       le32_to_cpu(fd->bd[i].BuffData),
1102		       le32_to_cpu(fd->bd[i].BDCtl));
1103	printk("\n");
1104}
1105
1106static void
1107panic_queues(struct net_device *dev)
1108{
1109	struct tc35815_local *lp = netdev_priv(dev);
1110	int i;
1111
1112	printk("TxFD base %p, start %u, end %u\n",
1113	       lp->tfd_base, lp->tfd_start, lp->tfd_end);
1114	printk("RxFD base %p limit %p cur %p\n",
1115	       lp->rfd_base, lp->rfd_limit, lp->rfd_cur);
1116	printk("FrFD %p\n", lp->fbl_ptr);
1117	for (i = 0; i < TX_FD_NUM; i++)
1118		dump_txfd(&lp->tfd_base[i]);
1119	for (i = 0; i < RX_FD_NUM; i++) {
1120		int bd_count = dump_rxfd(&lp->rfd_base[i]);
1121		i += (bd_count + 1) / 2;	/* skip BDs */
1122	}
1123	dump_frfd(lp->fbl_ptr);
1124	panic("%s: Illegal queue state.", dev->name);
1125}
1126#endif
1127
1128static void print_eth(const u8 *add)
1129{
1130	printk(KERN_DEBUG "print_eth(%p)\n", add);
1131	printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
1132		add + 6, add, add[12], add[13]);
1133}
1134
1135static int tc35815_tx_full(struct net_device *dev)
1136{
1137	struct tc35815_local *lp = netdev_priv(dev);
1138	return (lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end;
1139}
1140
1141static void tc35815_restart(struct net_device *dev)
1142{
1143	struct tc35815_local *lp = netdev_priv(dev);
1144	int ret;
1145
1146	if (lp->phy_dev) {
1147		ret = phy_init_hw(lp->phy_dev);
1148		if (ret)
1149			printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
1150	}
1151
1152	spin_lock_bh(&lp->rx_lock);
1153	spin_lock_irq(&lp->lock);
1154	tc35815_chip_reset(dev);
1155	tc35815_clear_queues(dev);
1156	tc35815_chip_init(dev);
1157	/* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1158	tc35815_set_multicast_list(dev);
1159	spin_unlock_irq(&lp->lock);
1160	spin_unlock_bh(&lp->rx_lock);
1161
1162	netif_wake_queue(dev);
1163}
1164
1165static void tc35815_restart_work(struct work_struct *work)
1166{
1167	struct tc35815_local *lp =
1168		container_of(work, struct tc35815_local, restart_work);
1169	struct net_device *dev = lp->dev;
1170
1171	tc35815_restart(dev);
1172}
1173
1174static void tc35815_schedule_restart(struct net_device *dev)
1175{
1176	struct tc35815_local *lp = netdev_priv(dev);
1177	struct tc35815_regs __iomem *tr =
1178		(struct tc35815_regs __iomem *)dev->base_addr;
1179	unsigned long flags;
1180
1181	/* disable interrupts */
1182	spin_lock_irqsave(&lp->lock, flags);
1183	tc_writel(0, &tr->Int_En);
1184	tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1185	schedule_work(&lp->restart_work);
1186	spin_unlock_irqrestore(&lp->lock, flags);
1187}
1188
1189static void tc35815_tx_timeout(struct net_device *dev)
1190{
1191	struct tc35815_regs __iomem *tr =
1192		(struct tc35815_regs __iomem *)dev->base_addr;
1193
1194	printk(KERN_WARNING "%s: transmit timed out, status %#x\n",
1195	       dev->name, tc_readl(&tr->Tx_Stat));
1196
1197	/* Try to restart the adaptor. */
1198	tc35815_schedule_restart(dev);
1199	dev->stats.tx_errors++;
1200}
1201
1202/*
1203 * Open/initialize the controller. This is called (in the current kernel)
1204 * sometime after booting when the 'ifconfig' program is run.
1205 *
1206 * This routine should set everything up anew at each open, even
1207 * registers that "should" only need to be set once at boot, so that
1208 * there is non-reboot way to recover if something goes wrong.
1209 */
1210static int
1211tc35815_open(struct net_device *dev)
1212{
1213	struct tc35815_local *lp = netdev_priv(dev);
1214
1215	/*
1216	 * This is used if the interrupt line can turned off (shared).
1217	 * See 3c503.c for an example of selecting the IRQ at config-time.
1218	 */
1219	if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
1220			dev->name, dev))
1221		return -EAGAIN;
1222
1223	tc35815_chip_reset(dev);
1224
1225	if (tc35815_init_queues(dev) != 0) {
1226		free_irq(dev->irq, dev);
1227		return -EAGAIN;
1228	}
1229
1230	napi_enable(&lp->napi);
1231
1232	/* Reset the hardware here. Don't forget to set the station address. */
1233	spin_lock_irq(&lp->lock);
1234	tc35815_chip_init(dev);
1235	spin_unlock_irq(&lp->lock);
1236
1237	netif_carrier_off(dev);
1238	/* schedule a link state check */
1239	phy_start(lp->phy_dev);
1240
1241	/* We are now ready to accept transmit requeusts from
1242	 * the queueing layer of the networking.
1243	 */
1244	netif_start_queue(dev);
1245
1246	return 0;
1247}
1248
1249/* This will only be invoked if your driver is _not_ in XOFF state.
1250 * What this means is that you need not check it, and that this
1251 * invariant will hold if you make sure that the netif_*_queue()
1252 * calls are done at the proper times.
1253 */
1254static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1255{
1256	struct tc35815_local *lp = netdev_priv(dev);
1257	struct TxFD *txfd;
1258	unsigned long flags;
1259
1260	/* If some error occurs while trying to transmit this
1261	 * packet, you should return '1' from this function.
1262	 * In such a case you _may not_ do anything to the
1263	 * SKB, it is still owned by the network queueing
1264	 * layer when an error is returned.  This means you
1265	 * may not modify any SKB fields, you may not free
1266	 * the SKB, etc.
1267	 */
1268
1269	/* This is the most common case for modern hardware.
1270	 * The spinlock protects this code from the TX complete
1271	 * hardware interrupt handler.  Queue flow control is
1272	 * thus managed under this lock as well.
1273	 */
1274	spin_lock_irqsave(&lp->lock, flags);
1275
1276	/* failsafe... (handle txdone now if half of FDs are used) */
1277	if ((lp->tfd_start + TX_FD_NUM - lp->tfd_end) % TX_FD_NUM >
1278	    TX_FD_NUM / 2)
1279		tc35815_txdone(dev);
1280
1281	if (netif_msg_pktdata(lp))
1282		print_eth(skb->data);
1283#ifdef DEBUG
1284	if (lp->tx_skbs[lp->tfd_start].skb) {
1285		printk("%s: tx_skbs conflict.\n", dev->name);
1286		panic_queues(dev);
1287	}
1288#else
1289	BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
1290#endif
1291	lp->tx_skbs[lp->tfd_start].skb = skb;
1292	lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1293
1294	/*add to ring */
1295	txfd = &lp->tfd_base[lp->tfd_start];
1296	txfd->bd.BuffData = cpu_to_le32(lp->tx_skbs[lp->tfd_start].skb_dma);
1297	txfd->bd.BDCtl = cpu_to_le32(skb->len);
1298	txfd->fd.FDSystem = cpu_to_le32(lp->tfd_start);
1299	txfd->fd.FDCtl = cpu_to_le32(FD_CownsFD | (1 << FD_BDCnt_SHIFT));
1300
1301	if (lp->tfd_start == lp->tfd_end) {
1302		struct tc35815_regs __iomem *tr =
1303			(struct tc35815_regs __iomem *)dev->base_addr;
1304		/* Start DMA Transmitter. */
1305		txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1306		txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1307		if (netif_msg_tx_queued(lp)) {
1308			printk("%s: starting TxFD.\n", dev->name);
1309			dump_txfd(txfd);
1310		}
1311		tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1312	} else {
1313		txfd->fd.FDNext &= cpu_to_le32(~FD_Next_EOL);
1314		if (netif_msg_tx_queued(lp)) {
1315			printk("%s: queueing TxFD.\n", dev->name);
1316			dump_txfd(txfd);
1317		}
1318	}
1319	lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1320
1321	/* If we just used up the very last entry in the
1322	 * TX ring on this device, tell the queueing
1323	 * layer to send no more.
1324	 */
1325	if (tc35815_tx_full(dev)) {
1326		if (netif_msg_tx_queued(lp))
1327			printk(KERN_WARNING "%s: TxFD Exhausted.\n", dev->name);
1328		netif_stop_queue(dev);
1329	}
1330
1331	/* When the TX completion hw interrupt arrives, this
1332	 * is when the transmit statistics are updated.
1333	 */
1334
1335	spin_unlock_irqrestore(&lp->lock, flags);
1336	return NETDEV_TX_OK;
1337}
1338
1339#define FATAL_ERROR_INT \
1340	(Int_IntPCI | Int_DmParErr | Int_IntNRAbt)
1341static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1342{
1343	static int count;
1344	printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):",
1345	       dev->name, status);
1346	if (status & Int_IntPCI)
1347		printk(" IntPCI");
1348	if (status & Int_DmParErr)
1349		printk(" DmParErr");
1350	if (status & Int_IntNRAbt)
1351		printk(" IntNRAbt");
1352	printk("\n");
1353	if (count++ > 100)
1354		panic("%s: Too many fatal errors.", dev->name);
1355	printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1356	/* Try to restart the adaptor. */
1357	tc35815_schedule_restart(dev);
1358}
1359
1360static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1361{
1362	struct tc35815_local *lp = netdev_priv(dev);
1363	int ret = -1;
1364
1365	/* Fatal errors... */
1366	if (status & FATAL_ERROR_INT) {
1367		tc35815_fatal_error_interrupt(dev, status);
1368		return 0;
1369	}
1370	/* recoverable errors */
1371	if (status & Int_IntFDAEx) {
1372		if (netif_msg_rx_err(lp))
1373			dev_warn(&dev->dev,
1374				 "Free Descriptor Area Exhausted (%#x).\n",
1375				 status);
1376		dev->stats.rx_dropped++;
1377		ret = 0;
1378	}
1379	if (status & Int_IntBLEx) {
1380		if (netif_msg_rx_err(lp))
1381			dev_warn(&dev->dev,
1382				 "Buffer List Exhausted (%#x).\n",
1383				 status);
1384		dev->stats.rx_dropped++;
1385		ret = 0;
1386	}
1387	if (status & Int_IntExBD) {
1388		if (netif_msg_rx_err(lp))
1389			dev_warn(&dev->dev,
1390				 "Excessive Buffer Descriptiors (%#x).\n",
1391				 status);
1392		dev->stats.rx_length_errors++;
1393		ret = 0;
1394	}
1395
1396	/* normal notification */
1397	if (status & Int_IntMacRx) {
1398		/* Got a packet(s). */
1399		ret = tc35815_rx(dev, limit);
1400		lp->lstats.rx_ints++;
1401	}
1402	if (status & Int_IntMacTx) {
1403		/* Transmit complete. */
1404		lp->lstats.tx_ints++;
1405		spin_lock_irq(&lp->lock);
1406		tc35815_txdone(dev);
1407		spin_unlock_irq(&lp->lock);
1408		if (ret < 0)
1409			ret = 0;
1410	}
1411	return ret;
1412}
1413
1414/*
1415 * The typical workload of the driver:
1416 * Handle the network interface interrupts.
1417 */
1418static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1419{
1420	struct net_device *dev = dev_id;
1421	struct tc35815_local *lp = netdev_priv(dev);
1422	struct tc35815_regs __iomem *tr =
1423		(struct tc35815_regs __iomem *)dev->base_addr;
1424	u32 dmactl = tc_readl(&tr->DMA_Ctl);
1425
1426	if (!(dmactl & DMA_IntMask)) {
1427		/* disable interrupts */
1428		tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1429		if (napi_schedule_prep(&lp->napi))
1430			__napi_schedule(&lp->napi);
1431		else {
1432			printk(KERN_ERR "%s: interrupt taken in poll\n",
1433			       dev->name);
1434			BUG();
1435		}
1436		(void)tc_readl(&tr->Int_Src);	/* flush */
1437		return IRQ_HANDLED;
1438	}
1439	return IRQ_NONE;
1440}
1441
1442#ifdef CONFIG_NET_POLL_CONTROLLER
1443static void tc35815_poll_controller(struct net_device *dev)
1444{
1445	disable_irq(dev->irq);
1446	tc35815_interrupt(dev->irq, dev);
1447	enable_irq(dev->irq);
1448}
1449#endif
1450
1451/* We have a good packet(s), get it/them out of the buffers. */
1452static int
1453tc35815_rx(struct net_device *dev, int limit)
1454{
1455	struct tc35815_local *lp = netdev_priv(dev);
1456	unsigned int fdctl;
1457	int i;
1458	int received = 0;
1459
1460	while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1461		int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
1462		int pkt_len = fdctl & FD_FDLength_MASK;
1463		int bd_count = (fdctl & FD_BDCnt_MASK) >> FD_BDCnt_SHIFT;
1464#ifdef DEBUG
1465		struct RxFD *next_rfd;
1466#endif
1467#if (RX_CTL_CMD & Rx_StripCRC) == 0
1468		pkt_len -= ETH_FCS_LEN;
1469#endif
1470
1471		if (netif_msg_rx_status(lp))
1472			dump_rxfd(lp->rfd_cur);
1473		if (status & Rx_Good) {
1474			struct sk_buff *skb;
1475			unsigned char *data;
1476			int cur_bd;
1477
1478			if (--limit < 0)
1479				break;
1480			BUG_ON(bd_count > 1);
1481			cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1482				  & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1483#ifdef DEBUG
1484			if (cur_bd >= RX_BUF_NUM) {
1485				printk("%s: invalid BDID.\n", dev->name);
1486				panic_queues(dev);
1487			}
1488			BUG_ON(lp->rx_skbs[cur_bd].skb_dma !=
1489			       (le32_to_cpu(lp->rfd_cur->bd[0].BuffData) & ~3));
1490			if (!lp->rx_skbs[cur_bd].skb) {
1491				printk("%s: NULL skb.\n", dev->name);
1492				panic_queues(dev);
1493			}
1494#else
1495			BUG_ON(cur_bd >= RX_BUF_NUM);
1496#endif
1497			skb = lp->rx_skbs[cur_bd].skb;
1498			prefetch(skb->data);
1499			lp->rx_skbs[cur_bd].skb = NULL;
1500			pci_unmap_single(lp->pci_dev,
1501					 lp->rx_skbs[cur_bd].skb_dma,
1502					 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1503			if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
1504				memmove(skb->data, skb->data - NET_IP_ALIGN,
1505					pkt_len);
1506			data = skb_put(skb, pkt_len);
1507			if (netif_msg_pktdata(lp))
1508				print_eth(data);
1509			skb->protocol = eth_type_trans(skb, dev);
1510			netif_receive_skb(skb);
1511			received++;
1512			dev->stats.rx_packets++;
1513			dev->stats.rx_bytes += pkt_len;
1514		} else {
1515			dev->stats.rx_errors++;
1516			if (netif_msg_rx_err(lp))
1517				dev_info(&dev->dev, "Rx error (status %x)\n",
1518					 status & Rx_Stat_Mask);
1519			/* WORKAROUND: LongErr and CRCErr means Overflow. */
1520			if ((status & Rx_LongErr) && (status & Rx_CRCErr)) {
1521				status &= ~(Rx_LongErr|Rx_CRCErr);
1522				status |= Rx_Over;
1523			}
1524			if (status & Rx_LongErr)
1525				dev->stats.rx_length_errors++;
1526			if (status & Rx_Over)
1527				dev->stats.rx_fifo_errors++;
1528			if (status & Rx_CRCErr)
1529				dev->stats.rx_crc_errors++;
1530			if (status & Rx_Align)
1531				dev->stats.rx_frame_errors++;
1532		}
1533
1534		if (bd_count > 0) {
1535			/* put Free Buffer back to controller */
1536			int bdctl = le32_to_cpu(lp->rfd_cur->bd[bd_count - 1].BDCtl);
1537			unsigned char id =
1538				(bdctl & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
1539#ifdef DEBUG
1540			if (id >= RX_BUF_NUM) {
1541				printk("%s: invalid BDID.\n", dev->name);
1542				panic_queues(dev);
1543			}
1544#else
1545			BUG_ON(id >= RX_BUF_NUM);
1546#endif
1547			/* free old buffers */
1548			lp->fbl_count--;
1549			while (lp->fbl_count < RX_BUF_NUM)
1550			{
1551				unsigned char curid =
1552					(id + 1 + lp->fbl_count) % RX_BUF_NUM;
1553				struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1554#ifdef DEBUG
1555				bdctl = le32_to_cpu(bd->BDCtl);
1556				if (bdctl & BD_CownsBD) {
1557					printk("%s: Freeing invalid BD.\n",
1558					       dev->name);
1559					panic_queues(dev);
1560				}
1561#endif
1562				/* pass BD to controller */
1563				if (!lp->rx_skbs[curid].skb) {
1564					lp->rx_skbs[curid].skb =
1565						alloc_rxbuf_skb(dev,
1566								lp->pci_dev,
1567								&lp->rx_skbs[curid].skb_dma);
1568					if (!lp->rx_skbs[curid].skb)
1569						break; /* try on next reception */
1570					bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1571				}
1572				/* Note: BDLength was modified by chip. */
1573				bd->BDCtl = cpu_to_le32(BD_CownsBD |
1574							(curid << BD_RxBDID_SHIFT) |
1575							RX_BUF_SIZE);
1576				lp->fbl_count++;
1577			}
1578		}
1579
1580		/* put RxFD back to controller */
1581#ifdef DEBUG
1582		next_rfd = fd_bus_to_virt(lp,
1583					  le32_to_cpu(lp->rfd_cur->fd.FDNext));
1584		if (next_rfd < lp->rfd_base || next_rfd > lp->rfd_limit) {
1585			printk("%s: RxFD FDNext invalid.\n", dev->name);
1586			panic_queues(dev);
1587		}
1588#endif
1589		for (i = 0; i < (bd_count + 1) / 2 + 1; i++) {
1590			/* pass FD to controller */
1591#ifdef DEBUG
1592			lp->rfd_cur->fd.FDNext = cpu_to_le32(0xdeaddead);
1593#else
1594			lp->rfd_cur->fd.FDNext = cpu_to_le32(FD_Next_EOL);
1595#endif
1596			lp->rfd_cur->fd.FDCtl = cpu_to_le32(FD_CownsFD);
1597			lp->rfd_cur++;
1598		}
1599		if (lp->rfd_cur > lp->rfd_limit)
1600			lp->rfd_cur = lp->rfd_base;
1601#ifdef DEBUG
1602		if (lp->rfd_cur != next_rfd)
1603			printk("rfd_cur = %p, next_rfd %p\n",
1604			       lp->rfd_cur, next_rfd);
1605#endif
1606	}
1607
1608	return received;
1609}
1610
1611static int tc35815_poll(struct napi_struct *napi, int budget)
1612{
1613	struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
1614	struct net_device *dev = lp->dev;
1615	struct tc35815_regs __iomem *tr =
1616		(struct tc35815_regs __iomem *)dev->base_addr;
1617	int received = 0, handled;
1618	u32 status;
1619
1620	if (budget <= 0)
1621		return received;
1622
1623	spin_lock(&lp->rx_lock);
1624	status = tc_readl(&tr->Int_Src);
1625	do {
1626		/* BLEx, FDAEx will be cleared later */
1627		tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1628			  &tr->Int_Src);	/* write to clear */
1629
1630		handled = tc35815_do_interrupt(dev, status, budget - received);
1631		if (status & (Int_BLEx | Int_FDAEx))
1632			tc_writel(status & (Int_BLEx | Int_FDAEx),
1633				  &tr->Int_Src);
1634		if (handled >= 0) {
1635			received += handled;
1636			if (received >= budget)
1637				break;
1638		}
1639		status = tc_readl(&tr->Int_Src);
1640	} while (status);
1641	spin_unlock(&lp->rx_lock);
1642
1643	if (received < budget) {
1644		napi_complete(napi);
1645		/* enable interrupts */
1646		tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1647	}
1648	return received;
1649}
1650
1651#define TX_STA_ERR	(Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1652
1653static void
1654tc35815_check_tx_stat(struct net_device *dev, int status)
1655{
1656	struct tc35815_local *lp = netdev_priv(dev);
1657	const char *msg = NULL;
1658
1659	/* count collisions */
1660	if (status & Tx_ExColl)
1661		dev->stats.collisions += 16;
1662	if (status & Tx_TxColl_MASK)
1663		dev->stats.collisions += status & Tx_TxColl_MASK;
1664
1665	/* TX4939 does not have NCarr */
1666	if (lp->chiptype == TC35815_TX4939)
1667		status &= ~Tx_NCarr;
1668	/* WORKAROUND: ignore LostCrS in full duplex operation */
1669	if (!lp->link || lp->duplex == DUPLEX_FULL)
1670		status &= ~Tx_NCarr;
1671
1672	if (!(status & TX_STA_ERR)) {
1673		/* no error. */
1674		dev->stats.tx_packets++;
1675		return;
1676	}
1677
1678	dev->stats.tx_errors++;
1679	if (status & Tx_ExColl) {
1680		dev->stats.tx_aborted_errors++;
1681		msg = "Excessive Collision.";
1682	}
1683	if (status & Tx_Under) {
1684		dev->stats.tx_fifo_errors++;
1685		msg = "Tx FIFO Underrun.";
1686		if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1687			lp->lstats.tx_underrun++;
1688			if (lp->lstats.tx_underrun >= TX_THRESHOLD_KEEP_LIMIT) {
1689				struct tc35815_regs __iomem *tr =
1690					(struct tc35815_regs __iomem *)dev->base_addr;
1691				tc_writel(TX_THRESHOLD_MAX, &tr->TxThrsh);
1692				msg = "Tx FIFO Underrun.Change Tx threshold to max.";
1693			}
1694		}
1695	}
1696	if (status & Tx_Defer) {
1697		dev->stats.tx_fifo_errors++;
1698		msg = "Excessive Deferral.";
1699	}
1700	if (status & Tx_NCarr) {
1701		dev->stats.tx_carrier_errors++;
1702		msg = "Lost Carrier Sense.";
1703	}
1704	if (status & Tx_LateColl) {
1705		dev->stats.tx_aborted_errors++;
1706		msg = "Late Collision.";
1707	}
1708	if (status & Tx_TxPar) {
1709		dev->stats.tx_fifo_errors++;
1710		msg = "Transmit Parity Error.";
1711	}
1712	if (status & Tx_SQErr) {
1713		dev->stats.tx_heartbeat_errors++;
1714		msg = "Signal Quality Error.";
1715	}
1716	if (msg && netif_msg_tx_err(lp))
1717		printk(KERN_WARNING "%s: %s (%#x)\n", dev->name, msg, status);
1718}
1719
1720/* This handles TX complete events posted by the device
1721 * via interrupts.
1722 */
1723static void
1724tc35815_txdone(struct net_device *dev)
1725{
1726	struct tc35815_local *lp = netdev_priv(dev);
1727	struct TxFD *txfd;
1728	unsigned int fdctl;
1729
1730	txfd = &lp->tfd_base[lp->tfd_end];
1731	while (lp->tfd_start != lp->tfd_end &&
1732	       !((fdctl = le32_to_cpu(txfd->fd.FDCtl)) & FD_CownsFD)) {
1733		int status = le32_to_cpu(txfd->fd.FDStat);
1734		struct sk_buff *skb;
1735		unsigned long fdnext = le32_to_cpu(txfd->fd.FDNext);
1736		u32 fdsystem = le32_to_cpu(txfd->fd.FDSystem);
1737
1738		if (netif_msg_tx_done(lp)) {
1739			printk("%s: complete TxFD.\n", dev->name);
1740			dump_txfd(txfd);
1741		}
1742		tc35815_check_tx_stat(dev, status);
1743
1744		skb = fdsystem != 0xffffffff ?
1745			lp->tx_skbs[fdsystem].skb : NULL;
1746#ifdef DEBUG
1747		if (lp->tx_skbs[lp->tfd_end].skb != skb) {
1748			printk("%s: tx_skbs mismatch.\n", dev->name);
1749			panic_queues(dev);
1750		}
1751#else
1752		BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1753#endif
1754		if (skb) {
1755			dev->stats.tx_bytes += skb->len;
1756			pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
1757			lp->tx_skbs[lp->tfd_end].skb = NULL;
1758			lp->tx_skbs[lp->tfd_end].skb_dma = 0;
1759			dev_kfree_skb_any(skb);
1760		}
1761		txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
1762
1763		lp->tfd_end = (lp->tfd_end + 1) % TX_FD_NUM;
1764		txfd = &lp->tfd_base[lp->tfd_end];
1765#ifdef DEBUG
1766		if ((fdnext & ~FD_Next_EOL) != fd_virt_to_bus(lp, txfd)) {
1767			printk("%s: TxFD FDNext invalid.\n", dev->name);
1768			panic_queues(dev);
1769		}
1770#endif
1771		if (fdnext & FD_Next_EOL) {
1772			/* DMA Transmitter has been stopping... */
1773			if (lp->tfd_end != lp->tfd_start) {
1774				struct tc35815_regs __iomem *tr =
1775					(struct tc35815_regs __iomem *)dev->base_addr;
1776				int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1777				struct TxFD *txhead = &lp->tfd_base[head];
1778				int qlen = (lp->tfd_start + TX_FD_NUM
1779					    - lp->tfd_end) % TX_FD_NUM;
1780
1781#ifdef DEBUG
1782				if (!(le32_to_cpu(txfd->fd.FDCtl) & FD_CownsFD)) {
1783					printk("%s: TxFD FDCtl invalid.\n", dev->name);
1784					panic_queues(dev);
1785				}
1786#endif
1787				/* log max queue length */
1788				if (lp->lstats.max_tx_qlen < qlen)
1789					lp->lstats.max_tx_qlen = qlen;
1790
1791
1792				/* start DMA Transmitter again */
1793				txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1794				txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1795				if (netif_msg_tx_queued(lp)) {
1796					printk("%s: start TxFD on queue.\n",
1797					       dev->name);
1798					dump_txfd(txfd);
1799				}
1800				tc_writel(fd_virt_to_bus(lp, txfd), &tr->TxFrmPtr);
1801			}
1802			break;
1803		}
1804	}
1805
1806	/* If we had stopped the queue due to a "tx full"
1807	 * condition, and space has now been made available,
1808	 * wake up the queue.
1809	 */
1810	if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
1811		netif_wake_queue(dev);
1812}
1813
1814/* The inverse routine to tc35815_open(). */
1815static int
1816tc35815_close(struct net_device *dev)
1817{
1818	struct tc35815_local *lp = netdev_priv(dev);
1819
1820	netif_stop_queue(dev);
1821	napi_disable(&lp->napi);
1822	if (lp->phy_dev)
1823		phy_stop(lp->phy_dev);
1824	cancel_work_sync(&lp->restart_work);
1825
1826	/* Flush the Tx and disable Rx here. */
1827	tc35815_chip_reset(dev);
1828	free_irq(dev->irq, dev);
1829
1830	tc35815_free_queues(dev);
1831
1832	return 0;
1833
1834}
1835
1836/*
1837 * Get the current statistics.
1838 * This may be called with the card open or closed.
1839 */
1840static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1841{
1842	struct tc35815_regs __iomem *tr =
1843		(struct tc35815_regs __iomem *)dev->base_addr;
1844	if (netif_running(dev))
1845		/* Update the statistics from the device registers. */
1846		dev->stats.rx_missed_errors += tc_readl(&tr->Miss_Cnt);
1847
1848	return &dev->stats;
1849}
1850
1851static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
1852{
1853	struct tc35815_local *lp = netdev_priv(dev);
1854	struct tc35815_regs __iomem *tr =
1855		(struct tc35815_regs __iomem *)dev->base_addr;
1856	int cam_index = index * 6;
1857	u32 cam_data;
1858	u32 saved_addr;
1859
1860	saved_addr = tc_readl(&tr->CAM_Adr);
1861
1862	if (netif_msg_hw(lp))
1863		printk(KERN_DEBUG "%s: CAM %d: %pM\n",
1864			dev->name, index, addr);
1865	if (index & 1) {
1866		/* read modify write */
1867		tc_writel(cam_index - 2, &tr->CAM_Adr);
1868		cam_data = tc_readl(&tr->CAM_Data) & 0xffff0000;
1869		cam_data |= addr[0] << 8 | addr[1];
1870		tc_writel(cam_data, &tr->CAM_Data);
1871		/* write whole word */
1872		tc_writel(cam_index + 2, &tr->CAM_Adr);
1873		cam_data = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
1874		tc_writel(cam_data, &tr->CAM_Data);
1875	} else {
1876		/* write whole word */
1877		tc_writel(cam_index, &tr->CAM_Adr);
1878		cam_data = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1879		tc_writel(cam_data, &tr->CAM_Data);
1880		/* read modify write */
1881		tc_writel(cam_index + 4, &tr->CAM_Adr);
1882		cam_data = tc_readl(&tr->CAM_Data) & 0x0000ffff;
1883		cam_data |= addr[4] << 24 | (addr[5] << 16);
1884		tc_writel(cam_data, &tr->CAM_Data);
1885	}
1886
1887	tc_writel(saved_addr, &tr->CAM_Adr);
1888}
1889
1890
1891/*
1892 * Set or clear the multicast filter for this adaptor.
1893 * num_addrs == -1	Promiscuous mode, receive all packets
1894 * num_addrs == 0	Normal mode, clear multicast list
1895 * num_addrs > 0	Multicast mode, receive normal and MC packets,
1896 *			and do best-effort filtering.
1897 */
1898static void
1899tc35815_set_multicast_list(struct net_device *dev)
1900{
1901	struct tc35815_regs __iomem *tr =
1902		(struct tc35815_regs __iomem *)dev->base_addr;
1903
1904	if (dev->flags & IFF_PROMISC) {
1905		/* With some (all?) 100MHalf HUB, controller will hang
1906		 * if we enabled promiscuous mode before linkup... */
1907		struct tc35815_local *lp = netdev_priv(dev);
1908
1909		if (!lp->link)
1910			return;
1911		/* Enable promiscuous mode */
1912		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
1913	} else if ((dev->flags & IFF_ALLMULTI) ||
1914		  netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
1915		/* CAM 0, 1, 20 are reserved. */
1916		/* Disable promiscuous mode, use normal mode. */
1917		tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1918	} else if (!netdev_mc_empty(dev)) {
1919		struct netdev_hw_addr *ha;
1920		int i;
1921		int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1922
1923		tc_writel(0, &tr->CAM_Ctl);
1924		/* Walk the address list, and load the filter */
1925		i = 0;
1926		netdev_for_each_mc_addr(ha, dev) {
1927			/* entry 0,1 is reserved. */
1928			tc35815_set_cam_entry(dev, i + 2, ha->addr);
1929			ena_bits |= CAM_Ena_Bit(i + 2);
1930			i++;
1931		}
1932		tc_writel(ena_bits, &tr->CAM_Ena);
1933		tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1934	} else {
1935		tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
1936		tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
1937	}
1938}
1939
1940static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1941{
1942	struct tc35815_local *lp = netdev_priv(dev);
1943
1944	strlcpy(info->driver, MODNAME, sizeof(info->driver));
1945	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1946	strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
1947}
1948
1949static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1950{
1951	struct tc35815_local *lp = netdev_priv(dev);
1952
1953	if (!lp->phy_dev)
1954		return -ENODEV;
1955	return phy_ethtool_gset(lp->phy_dev, cmd);
1956}
1957
1958static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1959{
1960	struct tc35815_local *lp = netdev_priv(dev);
1961
1962	if (!lp->phy_dev)
1963		return -ENODEV;
1964	return phy_ethtool_sset(lp->phy_dev, cmd);
1965}
1966
1967static u32 tc35815_get_msglevel(struct net_device *dev)
1968{
1969	struct tc35815_local *lp = netdev_priv(dev);
1970	return lp->msg_enable;
1971}
1972
1973static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
1974{
1975	struct tc35815_local *lp = netdev_priv(dev);
1976	lp->msg_enable = datum;
1977}
1978
1979static int tc35815_get_sset_count(struct net_device *dev, int sset)
1980{
1981	struct tc35815_local *lp = netdev_priv(dev);
1982
1983	switch (sset) {
1984	case ETH_SS_STATS:
1985		return sizeof(lp->lstats) / sizeof(int);
1986	default:
1987		return -EOPNOTSUPP;
1988	}
1989}
1990
1991static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
1992{
1993	struct tc35815_local *lp = netdev_priv(dev);
1994	data[0] = lp->lstats.max_tx_qlen;
1995	data[1] = lp->lstats.tx_ints;
1996	data[2] = lp->lstats.rx_ints;
1997	data[3] = lp->lstats.tx_underrun;
1998}
1999
2000static struct {
2001	const char str[ETH_GSTRING_LEN];
2002} ethtool_stats_keys[] = {
2003	{ "max_tx_qlen" },
2004	{ "tx_ints" },
2005	{ "rx_ints" },
2006	{ "tx_underrun" },
2007};
2008
2009static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2010{
2011	memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys));
2012}
2013
2014static const struct ethtool_ops tc35815_ethtool_ops = {
2015	.get_drvinfo		= tc35815_get_drvinfo,
2016	.get_settings		= tc35815_get_settings,
2017	.set_settings		= tc35815_set_settings,
2018	.get_link		= ethtool_op_get_link,
2019	.get_msglevel		= tc35815_get_msglevel,
2020	.set_msglevel		= tc35815_set_msglevel,
2021	.get_strings		= tc35815_get_strings,
2022	.get_sset_count		= tc35815_get_sset_count,
2023	.get_ethtool_stats	= tc35815_get_ethtool_stats,
2024};
2025
2026static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2027{
2028	struct tc35815_local *lp = netdev_priv(dev);
2029
2030	if (!netif_running(dev))
2031		return -EINVAL;
2032	if (!lp->phy_dev)
2033		return -ENODEV;
2034	return phy_mii_ioctl(lp->phy_dev, rq, cmd);
2035}
2036
2037static void tc35815_chip_reset(struct net_device *dev)
2038{
2039	struct tc35815_regs __iomem *tr =
2040		(struct tc35815_regs __iomem *)dev->base_addr;
2041	int i;
2042	/* reset the controller */
2043	tc_writel(MAC_Reset, &tr->MAC_Ctl);
2044	udelay(4); /* 3200ns */
2045	i = 0;
2046	while (tc_readl(&tr->MAC_Ctl) & MAC_Reset) {
2047		if (i++ > 100) {
2048			printk(KERN_ERR "%s: MAC reset failed.\n", dev->name);
2049			break;
2050		}
2051		mdelay(1);
2052	}
2053	tc_writel(0, &tr->MAC_Ctl);
2054
2055	/* initialize registers to default value */
2056	tc_writel(0, &tr->DMA_Ctl);
2057	tc_writel(0, &tr->TxThrsh);
2058	tc_writel(0, &tr->TxPollCtr);
2059	tc_writel(0, &tr->RxFragSize);
2060	tc_writel(0, &tr->Int_En);
2061	tc_writel(0, &tr->FDA_Bas);
2062	tc_writel(0, &tr->FDA_Lim);
2063	tc_writel(0xffffffff, &tr->Int_Src);	/* Write 1 to clear */
2064	tc_writel(0, &tr->CAM_Ctl);
2065	tc_writel(0, &tr->Tx_Ctl);
2066	tc_writel(0, &tr->Rx_Ctl);
2067	tc_writel(0, &tr->CAM_Ena);
2068	(void)tc_readl(&tr->Miss_Cnt);	/* Read to clear */
2069
2070	/* initialize internal SRAM */
2071	tc_writel(DMA_TestMode, &tr->DMA_Ctl);
2072	for (i = 0; i < 0x1000; i += 4) {
2073		tc_writel(i, &tr->CAM_Adr);
2074		tc_writel(0, &tr->CAM_Data);
2075	}
2076	tc_writel(0, &tr->DMA_Ctl);
2077}
2078
2079static void tc35815_chip_init(struct net_device *dev)
2080{
2081	struct tc35815_local *lp = netdev_priv(dev);
2082	struct tc35815_regs __iomem *tr =
2083		(struct tc35815_regs __iomem *)dev->base_addr;
2084	unsigned long txctl = TX_CTL_CMD;
2085
2086	/* load station address to CAM */
2087	tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2088
2089	/* Enable CAM (broadcast and unicast) */
2090	tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2091	tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2092
2093	/* Use DMA_RxAlign_2 to make IP header 4-byte aligned. */
2094	if (HAVE_DMA_RXALIGN(lp))
2095		tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2096	else
2097		tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2098	tc_writel(0, &tr->TxPollCtr);	/* Batch mode */
2099	tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2100	tc_writel(INT_EN_CMD, &tr->Int_En);
2101
2102	/* set queues */
2103	tc_writel(fd_virt_to_bus(lp, lp->rfd_base), &tr->FDA_Bas);
2104	tc_writel((unsigned long)lp->rfd_limit - (unsigned long)lp->rfd_base,
2105		  &tr->FDA_Lim);
2106	/*
2107	 * Activation method:
2108	 * First, enable the MAC Transmitter and the DMA Receive circuits.
2109	 * Then enable the DMA Transmitter and the MAC Receive circuits.
2110	 */
2111	tc_writel(fd_virt_to_bus(lp, lp->fbl_ptr), &tr->BLFrmPtr);	/* start DMA receiver */
2112	tc_writel(RX_CTL_CMD, &tr->Rx_Ctl);	/* start MAC receiver */
2113
2114	/* start MAC transmitter */
2115	/* TX4939 does not have EnLCarr */
2116	if (lp->chiptype == TC35815_TX4939)
2117		txctl &= ~Tx_EnLCarr;
2118	/* WORKAROUND: ignore LostCrS in full duplex operation */
2119	if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2120		txctl &= ~Tx_EnLCarr;
2121	tc_writel(txctl, &tr->Tx_Ctl);
2122}
2123
2124#ifdef CONFIG_PM
2125static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2126{
2127	struct net_device *dev = pci_get_drvdata(pdev);
2128	struct tc35815_local *lp = netdev_priv(dev);
2129	unsigned long flags;
2130
2131	pci_save_state(pdev);
2132	if (!netif_running(dev))
2133		return 0;
2134	netif_device_detach(dev);
2135	if (lp->phy_dev)
2136		phy_stop(lp->phy_dev);
2137	spin_lock_irqsave(&lp->lock, flags);
2138	tc35815_chip_reset(dev);
2139	spin_unlock_irqrestore(&lp->lock, flags);
2140	pci_set_power_state(pdev, PCI_D3hot);
2141	return 0;
2142}
2143
2144static int tc35815_resume(struct pci_dev *pdev)
2145{
2146	struct net_device *dev = pci_get_drvdata(pdev);
2147	struct tc35815_local *lp = netdev_priv(dev);
2148
2149	pci_restore_state(pdev);
2150	if (!netif_running(dev))
2151		return 0;
2152	pci_set_power_state(pdev, PCI_D0);
2153	tc35815_restart(dev);
2154	netif_carrier_off(dev);
2155	if (lp->phy_dev)
2156		phy_start(lp->phy_dev);
2157	netif_device_attach(dev);
2158	return 0;
2159}
2160#endif /* CONFIG_PM */
2161
2162static struct pci_driver tc35815_pci_driver = {
2163	.name		= MODNAME,
2164	.id_table	= tc35815_pci_tbl,
2165	.probe		= tc35815_init_one,
2166	.remove		= tc35815_remove_one,
2167#ifdef CONFIG_PM
2168	.suspend	= tc35815_suspend,
2169	.resume		= tc35815_resume,
2170#endif
2171};
2172
2173module_param_named(speed, options.speed, int, 0);
2174MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2175module_param_named(duplex, options.duplex, int, 0);
2176MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2177
2178module_pci_driver(tc35815_pci_driver);
2179MODULE_DESCRIPTION("TOSHIBA TC35815 PCI 10M/100M Ethernet driver");
2180MODULE_LICENSE("GPL");