Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7	Copyright 2001 Manfred Spraul				    [natsemi.c]
   8	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
   9       	Written 1997-2001 by Donald Becker.			    [8139too.c]
  10	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12	This software may be used and distributed according to the terms of
  13	the GNU General Public License (GPL), incorporated herein by reference.
  14	Drivers based on or derived from this code fall under the GPL and must
  15	retain the authorship, copyright and license notice.  This file is not
  16	a complete program and may only be used when the entire operating
  17	system is licensed under the GPL.
  18
  19	See the file COPYING in this distribution for more information.
  20
  21	Contributors:
  22
  23		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27	TODO:
  28	* Test Tx checksumming thoroughly
  29
  30	Low priority TODO:
  31	* Complete reset on PciErr
  32	* Consider Rx interrupt mitigation using TimerIntr
  33	* Investigate using skb->priority with h/w VLAN priority
  34	* Investigate using High Priority Tx Queue with skb->priority
  35	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37	* Implement Tx software interrupt mitigation via
  38	  Tx descriptor bit
  39	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40	  for this to be supported, one must(?) turn on packet padding.
  41	* Support external MII transceivers (patch available)
  42
  43	NOTES:
  44	* TX checksumming is considered experimental.  It is off by
  45	  default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME		"8139cp"
  52#define DRV_VERSION		"1.3"
  53#define DRV_RELDATE		"Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <asm/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
 101				 NETIF_MSG_PROBE 	| \
 102				 NETIF_MSG_LINK)
 103#define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE		(0xff + 1)
 106#define CP_REGS_VER		1		/* version 1 */
 107#define CP_RX_RING_SIZE		64
 108#define CP_TX_RING_SIZE		64
 109#define CP_RING_BYTES		\
 110		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
 111		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
 112		 CP_STATS_SIZE)
 113#define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)					\
 116	(((CP)->tx_tail <= (CP)->tx_head) ?			\
 117	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
 118	  (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY		32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT		(6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
 134#define CP_MAX_MTU		4096
 135
 136enum {
 137	/* NIC register offsets */
 138	MAC0		= 0x00,	/* Ethernet hardware address. */
 139	MAR0		= 0x08,	/* Multicast filter. */
 140	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
 141	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
 142	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
 143	Cmd		= 0x37, /* Command register */
 144	IntrMask	= 0x3C, /* Interrupt mask */
 145	IntrStatus	= 0x3E, /* Interrupt status */
 146	TxConfig	= 0x40, /* Tx configuration */
 147	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
 148	RxConfig	= 0x44, /* Rx configuration */
 149	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
 150	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151	Config1		= 0x52, /* Config1 */
 152	Config3		= 0x59, /* Config3 */
 153	Config4		= 0x5A, /* Config4 */
 154	MultiIntr	= 0x5C, /* Multiple interrupt select */
 155	BasicModeCtrl	= 0x62,	/* MII BMCR */
 156	BasicModeStatus	= 0x64, /* MII BMSR */
 157	NWayAdvert	= 0x66, /* MII ADVERTISE */
 158	NWayLPAR	= 0x68, /* MII LPA */
 159	NWayExpansion	= 0x6A, /* MII Expansion */
 160	Config5		= 0xD8,	/* Config5 */
 161	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
 162	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
 163	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
 164	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
 165	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
 166	TxThresh	= 0xEC, /* Early Tx threshold */
 167	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
 168	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
 169
 170	/* Tx and Rx status descriptors */
 171	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 172	RingEnd		= (1 << 30), /* End of descriptor ring */
 173	FirstFrag	= (1 << 29), /* First segment of a packet */
 174	LastFrag	= (1 << 28), /* Final segment of a packet */
 175	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
 176	MSSShift	= 16,	     /* MSS value position */
 177	MSSMask		= 0xfff,     /* MSS value: 11 bits */
 178	TxError		= (1 << 23), /* Tx error summary */
 179	RxError		= (1 << 20), /* Rx error summary */
 180	IPCS		= (1 << 18), /* Calculate IP checksum */
 181	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
 182	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
 183	TxVlanTag	= (1 << 17), /* Add VLAN tag */
 184	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
 185	IPFail		= (1 << 15), /* IP checksum failed */
 186	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
 187	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
 188	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
 189	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 190	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 191	RxProtoTCP	= 1,
 192	RxProtoUDP	= 2,
 193	RxProtoIP	= 3,
 194	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
 195	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
 196	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
 197	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
 198	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
 199	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 200	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
 201	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
 202	RxErrCRC	= (1 << 18), /* Rx CRC error */
 203	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
 204	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
 205	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 206
 207	/* StatsAddr register */
 208	DumpStats	= (1 << 3),  /* Begin stats dump */
 209
 210	/* RxConfig register */
 211	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
 212	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
 213	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
 214	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
 215	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
 216	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
 217	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
 218	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
 219
 220	/* IntrMask / IntrStatus registers */
 221	PciErr		= (1 << 15), /* System error on the PCI bus */
 222	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 223	LenChg		= (1 << 13), /* Cable length change */
 224	SWInt		= (1 << 8),  /* Software-requested interrupt */
 225	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
 226	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
 227	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
 228	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
 229	TxErr		= (1 << 3),  /* Tx error */
 230	TxOK		= (1 << 2),  /* Tx packet sent */
 231	RxErr		= (1 << 1),  /* Rx error */
 232	RxOK		= (1 << 0),  /* Rx packet received */
 233	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
 234					but hardware likes to raise it */
 235
 236	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 237			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 238			  RxErr | RxOK | IntrResvd,
 239
 240	/* C mode command register */
 241	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
 242	RxOn		= (1 << 3),  /* Rx mode enable */
 243	TxOn		= (1 << 2),  /* Tx mode enable */
 244
 245	/* C+ mode command register */
 246	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
 247	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
 248	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 249	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
 250	CpRxOn		= (1 << 1),  /* Rx mode enable */
 251	CpTxOn		= (1 << 0),  /* Tx mode enable */
 252
 253	/* Cfg9436 EEPROM control register */
 254	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
 255	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
 256
 257	/* TxConfig register */
 258	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 259	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
 260
 261	/* Early Tx Threshold register */
 262	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
 263	TxThreshMax	= 2048,	     /* Max early Tx threshold */
 264
 265	/* Config1 register */
 266	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
 267	LWACT           = (1 << 4),  /* LWAKE active mode */
 268	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
 269
 270	/* Config3 register */
 271	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
 272	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 273	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 274
 275	/* Config4 register */
 276	LWPTN           = (1 << 1),  /* LWAKE Pattern */
 277	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 278
 279	/* Config5 register */
 280	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 281	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 282	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 283	LANWake         = (1 << 1),  /* Enable LANWake signal */
 284	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
 285
 286	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 287	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 288	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 289};
 290
 291static const unsigned int cp_rx_config =
 292	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
 293	  (RX_DMA_BURST << RxCfgDMAShift);
 294
 295struct cp_desc {
 296	__le32		opts1;
 297	__le32		opts2;
 298	__le64		addr;
 299};
 300
 301struct cp_dma_stats {
 302	__le64			tx_ok;
 303	__le64			rx_ok;
 304	__le64			tx_err;
 305	__le32			rx_err;
 306	__le16			rx_fifo;
 307	__le16			frame_align;
 308	__le32			tx_ok_1col;
 309	__le32			tx_ok_mcol;
 310	__le64			rx_ok_phys;
 311	__le64			rx_ok_bcast;
 312	__le32			rx_ok_mcast;
 313	__le16			tx_abort;
 314	__le16			tx_underrun;
 315} __packed;
 316
 317struct cp_extra_stats {
 318	unsigned long		rx_frags;
 319};
 320
 321struct cp_private {
 322	void			__iomem *regs;
 323	struct net_device	*dev;
 324	spinlock_t		lock;
 325	u32			msg_enable;
 326
 327	struct napi_struct	napi;
 328
 329	struct pci_dev		*pdev;
 330	u32			rx_config;
 331	u16			cpcmd;
 332
 333	struct cp_extra_stats	cp_stats;
 334
 335	unsigned		rx_head		____cacheline_aligned;
 336	unsigned		rx_tail;
 337	struct cp_desc		*rx_ring;
 338	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
 339
 340	unsigned		tx_head		____cacheline_aligned;
 341	unsigned		tx_tail;
 342	struct cp_desc		*tx_ring;
 343	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
 344
 345	unsigned		rx_buf_sz;
 346	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 347
 348	dma_addr_t		ring_dma;
 349
 350	struct mii_if_info	mii_if;
 351};
 352
 353#define cpr8(reg)	readb(cp->regs + (reg))
 354#define cpr16(reg)	readw(cp->regs + (reg))
 355#define cpr32(reg)	readl(cp->regs + (reg))
 356#define cpw8(reg,val)	writeb((val), cp->regs + (reg))
 357#define cpw16(reg,val)	writew((val), cp->regs + (reg))
 358#define cpw32(reg,val)	writel((val), cp->regs + (reg))
 359#define cpw8_f(reg,val) do {			\
 360	writeb((val), cp->regs + (reg));	\
 361	readb(cp->regs + (reg));		\
 362	} while (0)
 363#define cpw16_f(reg,val) do {			\
 364	writew((val), cp->regs + (reg));	\
 365	readw(cp->regs + (reg));		\
 366	} while (0)
 367#define cpw32_f(reg,val) do {			\
 368	writel((val), cp->regs + (reg));	\
 369	readl(cp->regs + (reg));		\
 370	} while (0)
 371
 372
 373static void __cp_set_rx_mode (struct net_device *dev);
 374static void cp_tx (struct cp_private *cp);
 375static void cp_clean_rings (struct cp_private *cp);
 376#ifdef CONFIG_NET_POLL_CONTROLLER
 377static void cp_poll_controller(struct net_device *dev);
 378#endif
 379static int cp_get_eeprom_len(struct net_device *dev);
 380static int cp_get_eeprom(struct net_device *dev,
 381			 struct ethtool_eeprom *eeprom, u8 *data);
 382static int cp_set_eeprom(struct net_device *dev,
 383			 struct ethtool_eeprom *eeprom, u8 *data);
 384
 385static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
 386	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
 387	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
 388	{ },
 389};
 390MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
 391
 392static struct {
 393	const char str[ETH_GSTRING_LEN];
 394} ethtool_stats_keys[] = {
 395	{ "tx_ok" },
 396	{ "rx_ok" },
 397	{ "tx_err" },
 398	{ "rx_err" },
 399	{ "rx_fifo" },
 400	{ "frame_align" },
 401	{ "tx_ok_1col" },
 402	{ "tx_ok_mcol" },
 403	{ "rx_ok_phys" },
 404	{ "rx_ok_bcast" },
 405	{ "rx_ok_mcast" },
 406	{ "tx_abort" },
 407	{ "tx_underrun" },
 408	{ "rx_frags" },
 409};
 410
 411
 412static inline void cp_set_rxbufsize (struct cp_private *cp)
 413{
 414	unsigned int mtu = cp->dev->mtu;
 415
 416	if (mtu > ETH_DATA_LEN)
 417		/* MTU + ethernet header + FCS + optional VLAN tag */
 418		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 419	else
 420		cp->rx_buf_sz = PKT_BUF_SZ;
 421}
 422
 423static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 424			      struct cp_desc *desc)
 425{
 426	u32 opts2 = le32_to_cpu(desc->opts2);
 427
 428	skb->protocol = eth_type_trans (skb, cp->dev);
 429
 430	cp->dev->stats.rx_packets++;
 431	cp->dev->stats.rx_bytes += skb->len;
 432
 433	if (opts2 & RxVlanTagged)
 434		__vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
 435
 436	napi_gro_receive(&cp->napi, skb);
 437}
 438
 439static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 440			    u32 status, u32 len)
 441{
 442	netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 443		  rx_tail, status, len);
 444	cp->dev->stats.rx_errors++;
 445	if (status & RxErrFrame)
 446		cp->dev->stats.rx_frame_errors++;
 447	if (status & RxErrCRC)
 448		cp->dev->stats.rx_crc_errors++;
 449	if ((status & RxErrRunt) || (status & RxErrLong))
 450		cp->dev->stats.rx_length_errors++;
 451	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 452		cp->dev->stats.rx_length_errors++;
 453	if (status & RxErrFIFO)
 454		cp->dev->stats.rx_fifo_errors++;
 455}
 456
 457static inline unsigned int cp_rx_csum_ok (u32 status)
 458{
 459	unsigned int protocol = (status >> 16) & 0x3;
 460
 461	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 462	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
 463		return 1;
 464	else
 465		return 0;
 466}
 467
 468static int cp_rx_poll(struct napi_struct *napi, int budget)
 469{
 470	struct cp_private *cp = container_of(napi, struct cp_private, napi);
 471	struct net_device *dev = cp->dev;
 472	unsigned int rx_tail = cp->rx_tail;
 473	int rx;
 474
 475rx_status_loop:
 476	rx = 0;
 477	cpw16(IntrStatus, cp_rx_intr_mask);
 478
 479	while (1) {
 480		u32 status, len;
 481		dma_addr_t mapping;
 482		struct sk_buff *skb, *new_skb;
 483		struct cp_desc *desc;
 484		const unsigned buflen = cp->rx_buf_sz;
 485
 486		skb = cp->rx_skb[rx_tail];
 487		BUG_ON(!skb);
 488
 489		desc = &cp->rx_ring[rx_tail];
 490		status = le32_to_cpu(desc->opts1);
 491		if (status & DescOwn)
 492			break;
 493
 494		len = (status & 0x1fff) - 4;
 495		mapping = le64_to_cpu(desc->addr);
 496
 497		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 498			/* we don't support incoming fragmented frames.
 499			 * instead, we attempt to ensure that the
 500			 * pre-allocated RX skbs are properly sized such
 501			 * that RX fragments are never encountered
 502			 */
 503			cp_rx_err_acct(cp, rx_tail, status, len);
 504			dev->stats.rx_dropped++;
 505			cp->cp_stats.rx_frags++;
 506			goto rx_next;
 507		}
 508
 509		if (status & (RxError | RxErrFIFO)) {
 510			cp_rx_err_acct(cp, rx_tail, status, len);
 511			goto rx_next;
 512		}
 513
 514		netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 515			  rx_tail, status, len);
 516
 517		new_skb = netdev_alloc_skb_ip_align(dev, buflen);
 518		if (!new_skb) {
 519			dev->stats.rx_dropped++;
 520			goto rx_next;
 521		}
 522
 523		dma_unmap_single(&cp->pdev->dev, mapping,
 524				 buflen, PCI_DMA_FROMDEVICE);
 525
 526		/* Handle checksum offloading for incoming packets. */
 527		if (cp_rx_csum_ok(status))
 528			skb->ip_summed = CHECKSUM_UNNECESSARY;
 529		else
 530			skb_checksum_none_assert(skb);
 531
 532		skb_put(skb, len);
 533
 534		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 535					 PCI_DMA_FROMDEVICE);
 536		cp->rx_skb[rx_tail] = new_skb;
 537
 538		cp_rx_skb(cp, skb, desc);
 539		rx++;
 540
 541rx_next:
 542		cp->rx_ring[rx_tail].opts2 = 0;
 543		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 544		if (rx_tail == (CP_RX_RING_SIZE - 1))
 545			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 546						  cp->rx_buf_sz);
 547		else
 548			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 549		rx_tail = NEXT_RX(rx_tail);
 550
 551		if (rx >= budget)
 552			break;
 553	}
 554
 555	cp->rx_tail = rx_tail;
 556
 557	/* if we did not reach work limit, then we're done with
 558	 * this round of polling
 559	 */
 560	if (rx < budget) {
 561		unsigned long flags;
 562
 563		if (cpr16(IntrStatus) & cp_rx_intr_mask)
 564			goto rx_status_loop;
 565
 566		spin_lock_irqsave(&cp->lock, flags);
 567		__napi_complete(napi);
 568		cpw16_f(IntrMask, cp_intr_mask);
 569		spin_unlock_irqrestore(&cp->lock, flags);
 570	}
 571
 572	return rx;
 573}
 574
 575static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 576{
 577	struct net_device *dev = dev_instance;
 578	struct cp_private *cp;
 579	u16 status;
 580
 581	if (unlikely(dev == NULL))
 582		return IRQ_NONE;
 583	cp = netdev_priv(dev);
 584
 585	status = cpr16(IntrStatus);
 586	if (!status || (status == 0xFFFF))
 587		return IRQ_NONE;
 588
 589	netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 590		  status, cpr8(Cmd), cpr16(CpCmd));
 591
 592	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 593
 594	spin_lock(&cp->lock);
 595
 596	/* close possible race's with dev_close */
 597	if (unlikely(!netif_running(dev))) {
 598		cpw16(IntrMask, 0);
 599		spin_unlock(&cp->lock);
 600		return IRQ_HANDLED;
 601	}
 602
 603	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 604		if (napi_schedule_prep(&cp->napi)) {
 605			cpw16_f(IntrMask, cp_norx_intr_mask);
 606			__napi_schedule(&cp->napi);
 607		}
 608
 609	if (status & (TxOK | TxErr | TxEmpty | SWInt))
 610		cp_tx(cp);
 611	if (status & LinkChg)
 612		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 613
 614	spin_unlock(&cp->lock);
 615
 616	if (status & PciErr) {
 617		u16 pci_status;
 618
 619		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 620		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 621		netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 622			   status, pci_status);
 623
 624		/* TODO: reset hardware */
 625	}
 626
 627	return IRQ_HANDLED;
 628}
 629
 630#ifdef CONFIG_NET_POLL_CONTROLLER
 631/*
 632 * Polling receive - used by netconsole and other diagnostic tools
 633 * to allow network i/o with interrupts disabled.
 634 */
 635static void cp_poll_controller(struct net_device *dev)
 636{
 637	disable_irq(dev->irq);
 638	cp_interrupt(dev->irq, dev);
 639	enable_irq(dev->irq);
 640}
 641#endif
 642
 643static void cp_tx (struct cp_private *cp)
 644{
 645	unsigned tx_head = cp->tx_head;
 646	unsigned tx_tail = cp->tx_tail;
 647
 648	while (tx_tail != tx_head) {
 649		struct cp_desc *txd = cp->tx_ring + tx_tail;
 650		struct sk_buff *skb;
 651		u32 status;
 652
 653		rmb();
 654		status = le32_to_cpu(txd->opts1);
 655		if (status & DescOwn)
 656			break;
 657
 658		skb = cp->tx_skb[tx_tail];
 659		BUG_ON(!skb);
 660
 661		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 662				 le32_to_cpu(txd->opts1) & 0xffff,
 663				 PCI_DMA_TODEVICE);
 664
 665		if (status & LastFrag) {
 666			if (status & (TxError | TxFIFOUnder)) {
 667				netif_dbg(cp, tx_err, cp->dev,
 668					  "tx err, status 0x%x\n", status);
 669				cp->dev->stats.tx_errors++;
 670				if (status & TxOWC)
 671					cp->dev->stats.tx_window_errors++;
 672				if (status & TxMaxCol)
 673					cp->dev->stats.tx_aborted_errors++;
 674				if (status & TxLinkFail)
 675					cp->dev->stats.tx_carrier_errors++;
 676				if (status & TxFIFOUnder)
 677					cp->dev->stats.tx_fifo_errors++;
 678			} else {
 679				cp->dev->stats.collisions +=
 680					((status >> TxColCntShift) & TxColCntMask);
 681				cp->dev->stats.tx_packets++;
 682				cp->dev->stats.tx_bytes += skb->len;
 683				netif_dbg(cp, tx_done, cp->dev,
 684					  "tx done, slot %d\n", tx_tail);
 685			}
 686			dev_kfree_skb_irq(skb);
 687		}
 688
 689		cp->tx_skb[tx_tail] = NULL;
 690
 691		tx_tail = NEXT_TX(tx_tail);
 692	}
 693
 694	cp->tx_tail = tx_tail;
 695
 696	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 697		netif_wake_queue(cp->dev);
 698}
 699
 700static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 701{
 702	return vlan_tx_tag_present(skb) ?
 703		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
 704}
 705
 706static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 707					struct net_device *dev)
 708{
 709	struct cp_private *cp = netdev_priv(dev);
 710	unsigned entry;
 711	u32 eor, flags;
 712	unsigned long intr_flags;
 713	__le32 opts2;
 714	int mss = 0;
 715
 716	spin_lock_irqsave(&cp->lock, intr_flags);
 717
 718	/* This is a hard error, log it. */
 719	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 720		netif_stop_queue(dev);
 721		spin_unlock_irqrestore(&cp->lock, intr_flags);
 722		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 723		return NETDEV_TX_BUSY;
 724	}
 725
 726	entry = cp->tx_head;
 727	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 728	mss = skb_shinfo(skb)->gso_size;
 729
 730	opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 731
 732	if (skb_shinfo(skb)->nr_frags == 0) {
 733		struct cp_desc *txd = &cp->tx_ring[entry];
 734		u32 len;
 735		dma_addr_t mapping;
 736
 737		len = skb->len;
 738		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 739		txd->opts2 = opts2;
 740		txd->addr = cpu_to_le64(mapping);
 741		wmb();
 742
 743		flags = eor | len | DescOwn | FirstFrag | LastFrag;
 744
 745		if (mss)
 746			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
 747		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 748			const struct iphdr *ip = ip_hdr(skb);
 749			if (ip->protocol == IPPROTO_TCP)
 750				flags |= IPCS | TCPCS;
 751			else if (ip->protocol == IPPROTO_UDP)
 752				flags |= IPCS | UDPCS;
 753			else
 754				WARN_ON(1);	/* we need a WARN() */
 755		}
 756
 757		txd->opts1 = cpu_to_le32(flags);
 758		wmb();
 759
 760		cp->tx_skb[entry] = skb;
 761		entry = NEXT_TX(entry);
 762	} else {
 763		struct cp_desc *txd;
 764		u32 first_len, first_eor;
 765		dma_addr_t first_mapping;
 766		int frag, first_entry = entry;
 767		const struct iphdr *ip = ip_hdr(skb);
 768
 769		/* We must give this initial chunk to the device last.
 770		 * Otherwise we could race with the device.
 771		 */
 772		first_eor = eor;
 773		first_len = skb_headlen(skb);
 774		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 775					       first_len, PCI_DMA_TODEVICE);
 776		cp->tx_skb[entry] = skb;
 777		entry = NEXT_TX(entry);
 778
 779		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 780			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 781			u32 len;
 782			u32 ctrl;
 783			dma_addr_t mapping;
 784
 785			len = this_frag->size;
 786			mapping = dma_map_single(&cp->pdev->dev,
 787						 ((void *) page_address(this_frag->page) +
 788						  this_frag->page_offset),
 789						 len, PCI_DMA_TODEVICE);
 790			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 791
 792			ctrl = eor | len | DescOwn;
 793
 794			if (mss)
 795				ctrl |= LargeSend |
 796					((mss & MSSMask) << MSSShift);
 797			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 798				if (ip->protocol == IPPROTO_TCP)
 799					ctrl |= IPCS | TCPCS;
 800				else if (ip->protocol == IPPROTO_UDP)
 801					ctrl |= IPCS | UDPCS;
 802				else
 803					BUG();
 804			}
 805
 806			if (frag == skb_shinfo(skb)->nr_frags - 1)
 807				ctrl |= LastFrag;
 808
 809			txd = &cp->tx_ring[entry];
 810			txd->opts2 = opts2;
 811			txd->addr = cpu_to_le64(mapping);
 812			wmb();
 813
 814			txd->opts1 = cpu_to_le32(ctrl);
 815			wmb();
 816
 817			cp->tx_skb[entry] = skb;
 818			entry = NEXT_TX(entry);
 819		}
 820
 821		txd = &cp->tx_ring[first_entry];
 822		txd->opts2 = opts2;
 823		txd->addr = cpu_to_le64(first_mapping);
 824		wmb();
 825
 826		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 827			if (ip->protocol == IPPROTO_TCP)
 828				txd->opts1 = cpu_to_le32(first_eor | first_len |
 829							 FirstFrag | DescOwn |
 830							 IPCS | TCPCS);
 831			else if (ip->protocol == IPPROTO_UDP)
 832				txd->opts1 = cpu_to_le32(first_eor | first_len |
 833							 FirstFrag | DescOwn |
 834							 IPCS | UDPCS);
 835			else
 836				BUG();
 837		} else
 838			txd->opts1 = cpu_to_le32(first_eor | first_len |
 839						 FirstFrag | DescOwn);
 840		wmb();
 841	}
 842	cp->tx_head = entry;
 843	netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 844		  entry, skb->len);
 845	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 846		netif_stop_queue(dev);
 847
 848	spin_unlock_irqrestore(&cp->lock, intr_flags);
 849
 850	cpw8(TxPoll, NormalTxPoll);
 851
 852	return NETDEV_TX_OK;
 853}
 854
 855/* Set or clear the multicast filter for this adaptor.
 856   This routine is not state sensitive and need not be SMP locked. */
 857
 858static void __cp_set_rx_mode (struct net_device *dev)
 859{
 860	struct cp_private *cp = netdev_priv(dev);
 861	u32 mc_filter[2];	/* Multicast hash filter */
 862	int rx_mode;
 863	u32 tmp;
 864
 865	/* Note: do not reorder, GCC is clever about common statements. */
 866	if (dev->flags & IFF_PROMISC) {
 867		/* Unconditionally log net taps. */
 868		rx_mode =
 869		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 870		    AcceptAllPhys;
 871		mc_filter[1] = mc_filter[0] = 0xffffffff;
 872	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 873		   (dev->flags & IFF_ALLMULTI)) {
 874		/* Too many to filter perfectly -- accept all multicasts. */
 875		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 876		mc_filter[1] = mc_filter[0] = 0xffffffff;
 877	} else {
 878		struct netdev_hw_addr *ha;
 879		rx_mode = AcceptBroadcast | AcceptMyPhys;
 880		mc_filter[1] = mc_filter[0] = 0;
 881		netdev_for_each_mc_addr(ha, dev) {
 882			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 883
 884			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 885			rx_mode |= AcceptMulticast;
 886		}
 887	}
 888
 889	/* We can safely update without stopping the chip. */
 890	tmp = cp_rx_config | rx_mode;
 891	if (cp->rx_config != tmp) {
 892		cpw32_f (RxConfig, tmp);
 893		cp->rx_config = tmp;
 894	}
 895	cpw32_f (MAR0 + 0, mc_filter[0]);
 896	cpw32_f (MAR0 + 4, mc_filter[1]);
 897}
 898
 899static void cp_set_rx_mode (struct net_device *dev)
 900{
 901	unsigned long flags;
 902	struct cp_private *cp = netdev_priv(dev);
 903
 904	spin_lock_irqsave (&cp->lock, flags);
 905	__cp_set_rx_mode(dev);
 906	spin_unlock_irqrestore (&cp->lock, flags);
 907}
 908
 909static void __cp_get_stats(struct cp_private *cp)
 910{
 911	/* only lower 24 bits valid; write any value to clear */
 912	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 913	cpw32 (RxMissed, 0);
 914}
 915
 916static struct net_device_stats *cp_get_stats(struct net_device *dev)
 917{
 918	struct cp_private *cp = netdev_priv(dev);
 919	unsigned long flags;
 920
 921	/* The chip only need report frame silently dropped. */
 922	spin_lock_irqsave(&cp->lock, flags);
 923 	if (netif_running(dev) && netif_device_present(dev))
 924 		__cp_get_stats(cp);
 925	spin_unlock_irqrestore(&cp->lock, flags);
 926
 927	return &dev->stats;
 928}
 929
 930static void cp_stop_hw (struct cp_private *cp)
 931{
 932	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 933	cpw16_f(IntrMask, 0);
 934	cpw8(Cmd, 0);
 935	cpw16_f(CpCmd, 0);
 936	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 937
 938	cp->rx_tail = 0;
 939	cp->tx_head = cp->tx_tail = 0;
 940}
 941
 942static void cp_reset_hw (struct cp_private *cp)
 943{
 944	unsigned work = 1000;
 945
 946	cpw8(Cmd, CmdReset);
 947
 948	while (work--) {
 949		if (!(cpr8(Cmd) & CmdReset))
 950			return;
 951
 952		schedule_timeout_uninterruptible(10);
 953	}
 954
 955	netdev_err(cp->dev, "hardware reset timeout\n");
 956}
 957
 958static inline void cp_start_hw (struct cp_private *cp)
 959{
 960	cpw16(CpCmd, cp->cpcmd);
 961	cpw8(Cmd, RxOn | TxOn);
 962}
 963
 964static void cp_init_hw (struct cp_private *cp)
 965{
 966	struct net_device *dev = cp->dev;
 967	dma_addr_t ring_dma;
 968
 969	cp_reset_hw(cp);
 970
 971	cpw8_f (Cfg9346, Cfg9346_Unlock);
 972
 973	/* Restore our idea of the MAC address. */
 974	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
 975	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 976
 977	cp_start_hw(cp);
 978	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 979
 980	__cp_set_rx_mode(dev);
 981	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
 982
 983	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
 984	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
 985	cpw8(Config3, PARMEnable);
 986	cp->wol_enabled = 0;
 987
 988	cpw8(Config5, cpr8(Config5) & PMEStatus);
 989
 990	cpw32_f(HiTxRingAddr, 0);
 991	cpw32_f(HiTxRingAddr + 4, 0);
 992
 993	ring_dma = cp->ring_dma;
 994	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
 995	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
 996
 997	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
 998	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
 999	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1000
1001	cpw16(MultiIntr, 0);
1002
1003	cpw16_f(IntrMask, cp_intr_mask);
1004
1005	cpw8_f(Cfg9346, Cfg9346_Lock);
1006}
1007
1008static int cp_refill_rx(struct cp_private *cp)
1009{
1010	struct net_device *dev = cp->dev;
1011	unsigned i;
1012
1013	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1014		struct sk_buff *skb;
1015		dma_addr_t mapping;
1016
1017		skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1018		if (!skb)
1019			goto err_out;
1020
1021		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1022					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1023		cp->rx_skb[i] = skb;
1024
1025		cp->rx_ring[i].opts2 = 0;
1026		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1027		if (i == (CP_RX_RING_SIZE - 1))
1028			cp->rx_ring[i].opts1 =
1029				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1030		else
1031			cp->rx_ring[i].opts1 =
1032				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1033	}
1034
1035	return 0;
1036
1037err_out:
1038	cp_clean_rings(cp);
1039	return -ENOMEM;
1040}
1041
1042static void cp_init_rings_index (struct cp_private *cp)
1043{
1044	cp->rx_tail = 0;
1045	cp->tx_head = cp->tx_tail = 0;
1046}
1047
1048static int cp_init_rings (struct cp_private *cp)
1049{
1050	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1051	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1052
1053	cp_init_rings_index(cp);
1054
1055	return cp_refill_rx (cp);
1056}
1057
1058static int cp_alloc_rings (struct cp_private *cp)
1059{
1060	void *mem;
1061
1062	mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1063				 &cp->ring_dma, GFP_KERNEL);
1064	if (!mem)
1065		return -ENOMEM;
1066
1067	cp->rx_ring = mem;
1068	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1069
1070	return cp_init_rings(cp);
1071}
1072
1073static void cp_clean_rings (struct cp_private *cp)
1074{
1075	struct cp_desc *desc;
1076	unsigned i;
1077
1078	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1079		if (cp->rx_skb[i]) {
1080			desc = cp->rx_ring + i;
1081			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1082					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1083			dev_kfree_skb(cp->rx_skb[i]);
1084		}
1085	}
1086
1087	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1088		if (cp->tx_skb[i]) {
1089			struct sk_buff *skb = cp->tx_skb[i];
1090
1091			desc = cp->tx_ring + i;
1092			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1093					 le32_to_cpu(desc->opts1) & 0xffff,
1094					 PCI_DMA_TODEVICE);
1095			if (le32_to_cpu(desc->opts1) & LastFrag)
1096				dev_kfree_skb(skb);
1097			cp->dev->stats.tx_dropped++;
1098		}
1099	}
1100
1101	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1102	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1103
1104	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1105	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1106}
1107
1108static void cp_free_rings (struct cp_private *cp)
1109{
1110	cp_clean_rings(cp);
1111	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1112			  cp->ring_dma);
1113	cp->rx_ring = NULL;
1114	cp->tx_ring = NULL;
1115}
1116
1117static int cp_open (struct net_device *dev)
1118{
1119	struct cp_private *cp = netdev_priv(dev);
1120	int rc;
1121
1122	netif_dbg(cp, ifup, dev, "enabling interface\n");
1123
1124	rc = cp_alloc_rings(cp);
1125	if (rc)
1126		return rc;
1127
1128	napi_enable(&cp->napi);
1129
1130	cp_init_hw(cp);
1131
1132	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1133	if (rc)
1134		goto err_out_hw;
1135
1136	netif_carrier_off(dev);
1137	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1138	netif_start_queue(dev);
1139
1140	return 0;
1141
1142err_out_hw:
1143	napi_disable(&cp->napi);
1144	cp_stop_hw(cp);
1145	cp_free_rings(cp);
1146	return rc;
1147}
1148
1149static int cp_close (struct net_device *dev)
1150{
1151	struct cp_private *cp = netdev_priv(dev);
1152	unsigned long flags;
1153
1154	napi_disable(&cp->napi);
1155
1156	netif_dbg(cp, ifdown, dev, "disabling interface\n");
1157
1158	spin_lock_irqsave(&cp->lock, flags);
1159
1160	netif_stop_queue(dev);
1161	netif_carrier_off(dev);
1162
1163	cp_stop_hw(cp);
1164
1165	spin_unlock_irqrestore(&cp->lock, flags);
1166
1167	free_irq(dev->irq, dev);
1168
1169	cp_free_rings(cp);
1170	return 0;
1171}
1172
1173static void cp_tx_timeout(struct net_device *dev)
1174{
1175	struct cp_private *cp = netdev_priv(dev);
1176	unsigned long flags;
1177	int rc;
1178
1179	netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1180		    cpr8(Cmd), cpr16(CpCmd),
1181		    cpr16(IntrStatus), cpr16(IntrMask));
1182
1183	spin_lock_irqsave(&cp->lock, flags);
1184
1185	cp_stop_hw(cp);
1186	cp_clean_rings(cp);
1187	rc = cp_init_rings(cp);
1188	cp_start_hw(cp);
1189
1190	netif_wake_queue(dev);
1191
1192	spin_unlock_irqrestore(&cp->lock, flags);
1193}
1194
1195#ifdef BROKEN
1196static int cp_change_mtu(struct net_device *dev, int new_mtu)
1197{
1198	struct cp_private *cp = netdev_priv(dev);
1199	int rc;
1200	unsigned long flags;
1201
1202	/* check for invalid MTU, according to hardware limits */
1203	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1204		return -EINVAL;
1205
1206	/* if network interface not up, no need for complexity */
1207	if (!netif_running(dev)) {
1208		dev->mtu = new_mtu;
1209		cp_set_rxbufsize(cp);	/* set new rx buf size */
1210		return 0;
1211	}
1212
1213	spin_lock_irqsave(&cp->lock, flags);
1214
1215	cp_stop_hw(cp);			/* stop h/w and free rings */
1216	cp_clean_rings(cp);
1217
1218	dev->mtu = new_mtu;
1219	cp_set_rxbufsize(cp);		/* set new rx buf size */
1220
1221	rc = cp_init_rings(cp);		/* realloc and restart h/w */
1222	cp_start_hw(cp);
1223
1224	spin_unlock_irqrestore(&cp->lock, flags);
1225
1226	return rc;
1227}
1228#endif /* BROKEN */
1229
1230static const char mii_2_8139_map[8] = {
1231	BasicModeCtrl,
1232	BasicModeStatus,
1233	0,
1234	0,
1235	NWayAdvert,
1236	NWayLPAR,
1237	NWayExpansion,
1238	0
1239};
1240
1241static int mdio_read(struct net_device *dev, int phy_id, int location)
1242{
1243	struct cp_private *cp = netdev_priv(dev);
1244
1245	return location < 8 && mii_2_8139_map[location] ?
1246	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1247}
1248
1249
1250static void mdio_write(struct net_device *dev, int phy_id, int location,
1251		       int value)
1252{
1253	struct cp_private *cp = netdev_priv(dev);
1254
1255	if (location == 0) {
1256		cpw8(Cfg9346, Cfg9346_Unlock);
1257		cpw16(BasicModeCtrl, value);
1258		cpw8(Cfg9346, Cfg9346_Lock);
1259	} else if (location < 8 && mii_2_8139_map[location])
1260		cpw16(mii_2_8139_map[location], value);
1261}
1262
1263/* Set the ethtool Wake-on-LAN settings */
1264static int netdev_set_wol (struct cp_private *cp,
1265			   const struct ethtool_wolinfo *wol)
1266{
1267	u8 options;
1268
1269	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1270	/* If WOL is being disabled, no need for complexity */
1271	if (wol->wolopts) {
1272		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1273		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1274	}
1275
1276	cpw8 (Cfg9346, Cfg9346_Unlock);
1277	cpw8 (Config3, options);
1278	cpw8 (Cfg9346, Cfg9346_Lock);
1279
1280	options = 0; /* Paranoia setting */
1281	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1282	/* If WOL is being disabled, no need for complexity */
1283	if (wol->wolopts) {
1284		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1285		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1286		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1287	}
1288
1289	cpw8 (Config5, options);
1290
1291	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1292
1293	return 0;
1294}
1295
1296/* Get the ethtool Wake-on-LAN settings */
1297static void netdev_get_wol (struct cp_private *cp,
1298	             struct ethtool_wolinfo *wol)
1299{
1300	u8 options;
1301
1302	wol->wolopts   = 0; /* Start from scratch */
1303	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1304		         WAKE_MCAST | WAKE_UCAST;
1305	/* We don't need to go on if WOL is disabled */
1306	if (!cp->wol_enabled) return;
1307
1308	options        = cpr8 (Config3);
1309	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1310	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1311
1312	options        = 0; /* Paranoia setting */
1313	options        = cpr8 (Config5);
1314	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1315	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1316	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1317}
1318
1319static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1320{
1321	struct cp_private *cp = netdev_priv(dev);
1322
1323	strcpy (info->driver, DRV_NAME);
1324	strcpy (info->version, DRV_VERSION);
1325	strcpy (info->bus_info, pci_name(cp->pdev));
1326}
1327
1328static int cp_get_regs_len(struct net_device *dev)
1329{
1330	return CP_REGS_SIZE;
1331}
1332
1333static int cp_get_sset_count (struct net_device *dev, int sset)
1334{
1335	switch (sset) {
1336	case ETH_SS_STATS:
1337		return CP_NUM_STATS;
1338	default:
1339		return -EOPNOTSUPP;
1340	}
1341}
1342
1343static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1344{
1345	struct cp_private *cp = netdev_priv(dev);
1346	int rc;
1347	unsigned long flags;
1348
1349	spin_lock_irqsave(&cp->lock, flags);
1350	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1351	spin_unlock_irqrestore(&cp->lock, flags);
1352
1353	return rc;
1354}
1355
1356static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1357{
1358	struct cp_private *cp = netdev_priv(dev);
1359	int rc;
1360	unsigned long flags;
1361
1362	spin_lock_irqsave(&cp->lock, flags);
1363	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1364	spin_unlock_irqrestore(&cp->lock, flags);
1365
1366	return rc;
1367}
1368
1369static int cp_nway_reset(struct net_device *dev)
1370{
1371	struct cp_private *cp = netdev_priv(dev);
1372	return mii_nway_restart(&cp->mii_if);
1373}
1374
1375static u32 cp_get_msglevel(struct net_device *dev)
1376{
1377	struct cp_private *cp = netdev_priv(dev);
1378	return cp->msg_enable;
1379}
1380
1381static void cp_set_msglevel(struct net_device *dev, u32 value)
1382{
1383	struct cp_private *cp = netdev_priv(dev);
1384	cp->msg_enable = value;
1385}
1386
1387static int cp_set_features(struct net_device *dev, u32 features)
1388{
1389	struct cp_private *cp = netdev_priv(dev);
1390	unsigned long flags;
1391
1392	if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1393		return 0;
1394
1395	spin_lock_irqsave(&cp->lock, flags);
1396
1397	if (features & NETIF_F_RXCSUM)
1398		cp->cpcmd |= RxChkSum;
1399	else
1400		cp->cpcmd &= ~RxChkSum;
1401
1402	if (features & NETIF_F_HW_VLAN_RX)
1403		cp->cpcmd |= RxVlanOn;
1404	else
1405		cp->cpcmd &= ~RxVlanOn;
1406
1407	cpw16_f(CpCmd, cp->cpcmd);
1408	spin_unlock_irqrestore(&cp->lock, flags);
1409
1410	return 0;
1411}
1412
1413static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1414		        void *p)
1415{
1416	struct cp_private *cp = netdev_priv(dev);
1417	unsigned long flags;
1418
1419	if (regs->len < CP_REGS_SIZE)
1420		return /* -EINVAL */;
1421
1422	regs->version = CP_REGS_VER;
1423
1424	spin_lock_irqsave(&cp->lock, flags);
1425	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1426	spin_unlock_irqrestore(&cp->lock, flags);
1427}
1428
1429static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1430{
1431	struct cp_private *cp = netdev_priv(dev);
1432	unsigned long flags;
1433
1434	spin_lock_irqsave (&cp->lock, flags);
1435	netdev_get_wol (cp, wol);
1436	spin_unlock_irqrestore (&cp->lock, flags);
1437}
1438
1439static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1440{
1441	struct cp_private *cp = netdev_priv(dev);
1442	unsigned long flags;
1443	int rc;
1444
1445	spin_lock_irqsave (&cp->lock, flags);
1446	rc = netdev_set_wol (cp, wol);
1447	spin_unlock_irqrestore (&cp->lock, flags);
1448
1449	return rc;
1450}
1451
1452static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1453{
1454	switch (stringset) {
1455	case ETH_SS_STATS:
1456		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1457		break;
1458	default:
1459		BUG();
1460		break;
1461	}
1462}
1463
1464static void cp_get_ethtool_stats (struct net_device *dev,
1465				  struct ethtool_stats *estats, u64 *tmp_stats)
1466{
1467	struct cp_private *cp = netdev_priv(dev);
1468	struct cp_dma_stats *nic_stats;
1469	dma_addr_t dma;
1470	int i;
1471
1472	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1473				       &dma, GFP_KERNEL);
1474	if (!nic_stats)
1475		return;
1476
1477	/* begin NIC statistics dump */
1478	cpw32(StatsAddr + 4, (u64)dma >> 32);
1479	cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1480	cpr32(StatsAddr);
1481
1482	for (i = 0; i < 1000; i++) {
1483		if ((cpr32(StatsAddr) & DumpStats) == 0)
1484			break;
1485		udelay(10);
1486	}
1487	cpw32(StatsAddr, 0);
1488	cpw32(StatsAddr + 4, 0);
1489	cpr32(StatsAddr);
1490
1491	i = 0;
1492	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1493	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1494	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1495	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1496	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1497	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1498	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1499	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1500	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1501	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1502	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1503	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1504	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1505	tmp_stats[i++] = cp->cp_stats.rx_frags;
1506	BUG_ON(i != CP_NUM_STATS);
1507
1508	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1509}
1510
1511static const struct ethtool_ops cp_ethtool_ops = {
1512	.get_drvinfo		= cp_get_drvinfo,
1513	.get_regs_len		= cp_get_regs_len,
1514	.get_sset_count		= cp_get_sset_count,
1515	.get_settings		= cp_get_settings,
1516	.set_settings		= cp_set_settings,
1517	.nway_reset		= cp_nway_reset,
1518	.get_link		= ethtool_op_get_link,
1519	.get_msglevel		= cp_get_msglevel,
1520	.set_msglevel		= cp_set_msglevel,
1521	.get_regs		= cp_get_regs,
1522	.get_wol		= cp_get_wol,
1523	.set_wol		= cp_set_wol,
1524	.get_strings		= cp_get_strings,
1525	.get_ethtool_stats	= cp_get_ethtool_stats,
1526	.get_eeprom_len		= cp_get_eeprom_len,
1527	.get_eeprom		= cp_get_eeprom,
1528	.set_eeprom		= cp_set_eeprom,
1529};
1530
1531static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1532{
1533	struct cp_private *cp = netdev_priv(dev);
1534	int rc;
1535	unsigned long flags;
1536
1537	if (!netif_running(dev))
1538		return -EINVAL;
1539
1540	spin_lock_irqsave(&cp->lock, flags);
1541	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1542	spin_unlock_irqrestore(&cp->lock, flags);
1543	return rc;
1544}
1545
1546static int cp_set_mac_address(struct net_device *dev, void *p)
1547{
1548	struct cp_private *cp = netdev_priv(dev);
1549	struct sockaddr *addr = p;
1550
1551	if (!is_valid_ether_addr(addr->sa_data))
1552		return -EADDRNOTAVAIL;
1553
1554	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1555
1556	spin_lock_irq(&cp->lock);
1557
1558	cpw8_f(Cfg9346, Cfg9346_Unlock);
1559	cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1560	cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1561	cpw8_f(Cfg9346, Cfg9346_Lock);
1562
1563	spin_unlock_irq(&cp->lock);
1564
1565	return 0;
1566}
1567
1568/* Serial EEPROM section. */
1569
1570/*  EEPROM_Ctrl bits. */
1571#define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1572#define EE_CS			0x08	/* EEPROM chip select. */
1573#define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1574#define EE_WRITE_0		0x00
1575#define EE_WRITE_1		0x02
1576#define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1577#define EE_ENB			(0x80 | EE_CS)
1578
1579/* Delay between EEPROM clock transitions.
1580   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1581 */
1582
1583#define eeprom_delay()	readl(ee_addr)
1584
1585/* The EEPROM commands include the alway-set leading bit. */
1586#define EE_EXTEND_CMD	(4)
1587#define EE_WRITE_CMD	(5)
1588#define EE_READ_CMD		(6)
1589#define EE_ERASE_CMD	(7)
1590
1591#define EE_EWDS_ADDR	(0)
1592#define EE_WRAL_ADDR	(1)
1593#define EE_ERAL_ADDR	(2)
1594#define EE_EWEN_ADDR	(3)
1595
1596#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1597
1598static void eeprom_cmd_start(void __iomem *ee_addr)
1599{
1600	writeb (EE_ENB & ~EE_CS, ee_addr);
1601	writeb (EE_ENB, ee_addr);
1602	eeprom_delay ();
1603}
1604
1605static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1606{
1607	int i;
1608
1609	/* Shift the command bits out. */
1610	for (i = cmd_len - 1; i >= 0; i--) {
1611		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1612		writeb (EE_ENB | dataval, ee_addr);
1613		eeprom_delay ();
1614		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1615		eeprom_delay ();
1616	}
1617	writeb (EE_ENB, ee_addr);
1618	eeprom_delay ();
1619}
1620
1621static void eeprom_cmd_end(void __iomem *ee_addr)
1622{
1623	writeb (~EE_CS, ee_addr);
1624	eeprom_delay ();
1625}
1626
1627static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1628			      int addr_len)
1629{
1630	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1631
1632	eeprom_cmd_start(ee_addr);
1633	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1634	eeprom_cmd_end(ee_addr);
1635}
1636
1637static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1638{
1639	int i;
1640	u16 retval = 0;
1641	void __iomem *ee_addr = ioaddr + Cfg9346;
1642	int read_cmd = location | (EE_READ_CMD << addr_len);
1643
1644	eeprom_cmd_start(ee_addr);
1645	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1646
1647	for (i = 16; i > 0; i--) {
1648		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1649		eeprom_delay ();
1650		retval =
1651		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1652				     0);
1653		writeb (EE_ENB, ee_addr);
1654		eeprom_delay ();
1655	}
1656
1657	eeprom_cmd_end(ee_addr);
1658
1659	return retval;
1660}
1661
1662static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1663			 int addr_len)
1664{
1665	int i;
1666	void __iomem *ee_addr = ioaddr + Cfg9346;
1667	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1668
1669	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1670
1671	eeprom_cmd_start(ee_addr);
1672	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1673	eeprom_cmd(ee_addr, val, 16);
1674	eeprom_cmd_end(ee_addr);
1675
1676	eeprom_cmd_start(ee_addr);
1677	for (i = 0; i < 20000; i++)
1678		if (readb(ee_addr) & EE_DATA_READ)
1679			break;
1680	eeprom_cmd_end(ee_addr);
1681
1682	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1683}
1684
1685static int cp_get_eeprom_len(struct net_device *dev)
1686{
1687	struct cp_private *cp = netdev_priv(dev);
1688	int size;
1689
1690	spin_lock_irq(&cp->lock);
1691	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1692	spin_unlock_irq(&cp->lock);
1693
1694	return size;
1695}
1696
1697static int cp_get_eeprom(struct net_device *dev,
1698			 struct ethtool_eeprom *eeprom, u8 *data)
1699{
1700	struct cp_private *cp = netdev_priv(dev);
1701	unsigned int addr_len;
1702	u16 val;
1703	u32 offset = eeprom->offset >> 1;
1704	u32 len = eeprom->len;
1705	u32 i = 0;
1706
1707	eeprom->magic = CP_EEPROM_MAGIC;
1708
1709	spin_lock_irq(&cp->lock);
1710
1711	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1712
1713	if (eeprom->offset & 1) {
1714		val = read_eeprom(cp->regs, offset, addr_len);
1715		data[i++] = (u8)(val >> 8);
1716		offset++;
1717	}
1718
1719	while (i < len - 1) {
1720		val = read_eeprom(cp->regs, offset, addr_len);
1721		data[i++] = (u8)val;
1722		data[i++] = (u8)(val >> 8);
1723		offset++;
1724	}
1725
1726	if (i < len) {
1727		val = read_eeprom(cp->regs, offset, addr_len);
1728		data[i] = (u8)val;
1729	}
1730
1731	spin_unlock_irq(&cp->lock);
1732	return 0;
1733}
1734
1735static int cp_set_eeprom(struct net_device *dev,
1736			 struct ethtool_eeprom *eeprom, u8 *data)
1737{
1738	struct cp_private *cp = netdev_priv(dev);
1739	unsigned int addr_len;
1740	u16 val;
1741	u32 offset = eeprom->offset >> 1;
1742	u32 len = eeprom->len;
1743	u32 i = 0;
1744
1745	if (eeprom->magic != CP_EEPROM_MAGIC)
1746		return -EINVAL;
1747
1748	spin_lock_irq(&cp->lock);
1749
1750	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1751
1752	if (eeprom->offset & 1) {
1753		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1754		val |= (u16)data[i++] << 8;
1755		write_eeprom(cp->regs, offset, val, addr_len);
1756		offset++;
1757	}
1758
1759	while (i < len - 1) {
1760		val = (u16)data[i++];
1761		val |= (u16)data[i++] << 8;
1762		write_eeprom(cp->regs, offset, val, addr_len);
1763		offset++;
1764	}
1765
1766	if (i < len) {
1767		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1768		val |= (u16)data[i];
1769		write_eeprom(cp->regs, offset, val, addr_len);
1770	}
1771
1772	spin_unlock_irq(&cp->lock);
1773	return 0;
1774}
1775
1776/* Put the board into D3cold state and wait for WakeUp signal */
1777static void cp_set_d3_state (struct cp_private *cp)
1778{
1779	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1780	pci_set_power_state (cp->pdev, PCI_D3hot);
1781}
1782
1783static const struct net_device_ops cp_netdev_ops = {
1784	.ndo_open		= cp_open,
1785	.ndo_stop		= cp_close,
1786	.ndo_validate_addr	= eth_validate_addr,
1787	.ndo_set_mac_address 	= cp_set_mac_address,
1788	.ndo_set_multicast_list	= cp_set_rx_mode,
1789	.ndo_get_stats		= cp_get_stats,
1790	.ndo_do_ioctl		= cp_ioctl,
1791	.ndo_start_xmit		= cp_start_xmit,
1792	.ndo_tx_timeout		= cp_tx_timeout,
1793	.ndo_set_features	= cp_set_features,
1794#ifdef BROKEN
1795	.ndo_change_mtu		= cp_change_mtu,
1796#endif
1797
1798#ifdef CONFIG_NET_POLL_CONTROLLER
1799	.ndo_poll_controller	= cp_poll_controller,
1800#endif
1801};
1802
1803static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1804{
1805	struct net_device *dev;
1806	struct cp_private *cp;
1807	int rc;
1808	void __iomem *regs;
1809	resource_size_t pciaddr;
1810	unsigned int addr_len, i, pci_using_dac;
1811
1812#ifndef MODULE
1813	static int version_printed;
1814	if (version_printed++ == 0)
1815		pr_info("%s", version);
1816#endif
1817
1818	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1819	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1820		dev_info(&pdev->dev,
1821			 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1822			 pdev->vendor, pdev->device, pdev->revision);
1823		return -ENODEV;
1824	}
1825
1826	dev = alloc_etherdev(sizeof(struct cp_private));
1827	if (!dev)
1828		return -ENOMEM;
1829	SET_NETDEV_DEV(dev, &pdev->dev);
1830
1831	cp = netdev_priv(dev);
1832	cp->pdev = pdev;
1833	cp->dev = dev;
1834	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1835	spin_lock_init (&cp->lock);
1836	cp->mii_if.dev = dev;
1837	cp->mii_if.mdio_read = mdio_read;
1838	cp->mii_if.mdio_write = mdio_write;
1839	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1840	cp->mii_if.phy_id_mask = 0x1f;
1841	cp->mii_if.reg_num_mask = 0x1f;
1842	cp_set_rxbufsize(cp);
1843
1844	rc = pci_enable_device(pdev);
1845	if (rc)
1846		goto err_out_free;
1847
1848	rc = pci_set_mwi(pdev);
1849	if (rc)
1850		goto err_out_disable;
1851
1852	rc = pci_request_regions(pdev, DRV_NAME);
1853	if (rc)
1854		goto err_out_mwi;
1855
1856	pciaddr = pci_resource_start(pdev, 1);
1857	if (!pciaddr) {
1858		rc = -EIO;
1859		dev_err(&pdev->dev, "no MMIO resource\n");
1860		goto err_out_res;
1861	}
1862	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1863		rc = -EIO;
1864		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1865		       (unsigned long long)pci_resource_len(pdev, 1));
1866		goto err_out_res;
1867	}
1868
1869	/* Configure DMA attributes. */
1870	if ((sizeof(dma_addr_t) > 4) &&
1871	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1872	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1873		pci_using_dac = 1;
1874	} else {
1875		pci_using_dac = 0;
1876
1877		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1878		if (rc) {
1879			dev_err(&pdev->dev,
1880				"No usable DMA configuration, aborting\n");
1881			goto err_out_res;
1882		}
1883		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1884		if (rc) {
1885			dev_err(&pdev->dev,
1886				"No usable consistent DMA configuration, aborting\n");
1887			goto err_out_res;
1888		}
1889	}
1890
1891	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1892		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1893
1894	dev->features |= NETIF_F_RXCSUM;
1895	dev->hw_features |= NETIF_F_RXCSUM;
1896
1897	regs = ioremap(pciaddr, CP_REGS_SIZE);
1898	if (!regs) {
1899		rc = -EIO;
1900		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1901			(unsigned long long)pci_resource_len(pdev, 1),
1902		       (unsigned long long)pciaddr);
1903		goto err_out_res;
1904	}
1905	dev->base_addr = (unsigned long) regs;
1906	cp->regs = regs;
1907
1908	cp_stop_hw(cp);
1909
1910	/* read MAC address from EEPROM */
1911	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1912	for (i = 0; i < 3; i++)
1913		((__le16 *) (dev->dev_addr))[i] =
1914		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1915	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1916
1917	dev->netdev_ops = &cp_netdev_ops;
1918	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1919	dev->ethtool_ops = &cp_ethtool_ops;
1920	dev->watchdog_timeo = TX_TIMEOUT;
1921
1922	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1923
1924	if (pci_using_dac)
1925		dev->features |= NETIF_F_HIGHDMA;
1926
1927	/* disabled by default until verified */
1928	dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1929		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1930	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1931		NETIF_F_HIGHDMA;
1932
1933	dev->irq = pdev->irq;
1934
1935	rc = register_netdev(dev);
1936	if (rc)
1937		goto err_out_iomap;
1938
1939	netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1940		    dev->base_addr, dev->dev_addr, dev->irq);
1941
1942	pci_set_drvdata(pdev, dev);
1943
1944	/* enable busmastering and memory-write-invalidate */
1945	pci_set_master(pdev);
1946
1947	if (cp->wol_enabled)
1948		cp_set_d3_state (cp);
1949
1950	return 0;
1951
1952err_out_iomap:
1953	iounmap(regs);
1954err_out_res:
1955	pci_release_regions(pdev);
1956err_out_mwi:
1957	pci_clear_mwi(pdev);
1958err_out_disable:
1959	pci_disable_device(pdev);
1960err_out_free:
1961	free_netdev(dev);
1962	return rc;
1963}
1964
1965static void cp_remove_one (struct pci_dev *pdev)
1966{
1967	struct net_device *dev = pci_get_drvdata(pdev);
1968	struct cp_private *cp = netdev_priv(dev);
1969
1970	unregister_netdev(dev);
1971	iounmap(cp->regs);
1972	if (cp->wol_enabled)
1973		pci_set_power_state (pdev, PCI_D0);
1974	pci_release_regions(pdev);
1975	pci_clear_mwi(pdev);
1976	pci_disable_device(pdev);
1977	pci_set_drvdata(pdev, NULL);
1978	free_netdev(dev);
1979}
1980
1981#ifdef CONFIG_PM
1982static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1983{
1984	struct net_device *dev = pci_get_drvdata(pdev);
1985	struct cp_private *cp = netdev_priv(dev);
1986	unsigned long flags;
1987
1988	if (!netif_running(dev))
1989		return 0;
1990
1991	netif_device_detach (dev);
1992	netif_stop_queue (dev);
1993
1994	spin_lock_irqsave (&cp->lock, flags);
1995
1996	/* Disable Rx and Tx */
1997	cpw16 (IntrMask, 0);
1998	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
1999
2000	spin_unlock_irqrestore (&cp->lock, flags);
2001
2002	pci_save_state(pdev);
2003	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2004	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2005
2006	return 0;
2007}
2008
2009static int cp_resume (struct pci_dev *pdev)
2010{
2011	struct net_device *dev = pci_get_drvdata (pdev);
2012	struct cp_private *cp = netdev_priv(dev);
2013	unsigned long flags;
2014
2015	if (!netif_running(dev))
2016		return 0;
2017
2018	netif_device_attach (dev);
2019
2020	pci_set_power_state(pdev, PCI_D0);
2021	pci_restore_state(pdev);
2022	pci_enable_wake(pdev, PCI_D0, 0);
2023
2024	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2025	cp_init_rings_index (cp);
2026	cp_init_hw (cp);
2027	netif_start_queue (dev);
2028
2029	spin_lock_irqsave (&cp->lock, flags);
2030
2031	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2032
2033	spin_unlock_irqrestore (&cp->lock, flags);
2034
2035	return 0;
2036}
2037#endif /* CONFIG_PM */
2038
2039static struct pci_driver cp_driver = {
2040	.name         = DRV_NAME,
2041	.id_table     = cp_pci_tbl,
2042	.probe        =	cp_init_one,
2043	.remove       = cp_remove_one,
2044#ifdef CONFIG_PM
2045	.resume       = cp_resume,
2046	.suspend      = cp_suspend,
2047#endif
2048};
2049
2050static int __init cp_init (void)
2051{
2052#ifdef MODULE
2053	pr_info("%s", version);
2054#endif
2055	return pci_register_driver(&cp_driver);
2056}
2057
2058static void __exit cp_exit (void)
2059{
2060	pci_unregister_driver (&cp_driver);
2061}
2062
2063module_init(cp_init);
2064module_exit(cp_exit);