Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3	Written 1998-2001 by Donald Becker.
   4
   5	Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
   6
   7	This software may be used and distributed according to the terms of
   8	the GNU General Public License (GPL), incorporated herein by reference.
   9	Drivers based on or derived from this code fall under the GPL and must
  10	retain the authorship, copyright and license notice.  This file is not
  11	a complete program and may only be used when the entire operating
  12	system is licensed under the GPL.
  13
  14	This driver is designed for the VIA VT86C100A Rhine-I.
  15	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16	and management NIC 6105M).
  17
  18	The author may be reached as becker@scyld.com, or C/O
  19	Scyld Computing Corporation
  20	410 Severn Ave., Suite 210
  21	Annapolis MD 21403
  22
  23
  24	This driver contains some changes from the original Donald Becker
  25	version. He may or may not be interested in bug reports on this
  26	code. You can find his versions at:
  27	http://www.scyld.com/network/via-rhine.html
  28	[link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#define DRV_NAME	"via-rhine"
 
 
  35
  36#include <linux/types.h>
  37
  38/* A few user-configurable values.
  39   These may be modified when a driver module is loaded. */
  40static int debug = 0;
  41#define RHINE_MSG_DEFAULT \
  42        (0x0000)
  43
  44/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  45   Setting to > 1518 effectively disables this feature. */
  46#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  47	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
  48	defined(__sh__) || defined(__mips__)
  49static int rx_copybreak = 1518;
  50#else
  51static int rx_copybreak;
  52#endif
  53
  54/* Work-around for broken BIOSes: they are unable to get the chip back out of
  55   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  56static bool avoid_D3;
  57
  58/*
  59 * In case you are looking for 'options[]' or 'full_duplex[]', they
  60 * are gone. Use ethtool(8) instead.
  61 */
  62
  63/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  64   The Rhine has a 64 element 8390-like hash table. */
  65static const int multicast_filter_limit = 32;
  66
  67
  68/* Operational parameters that are set at compile time. */
  69
  70/* Keep the ring sizes a power of two for compile efficiency.
  71 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  72 * Making the Tx ring too large decreases the effectiveness of channel
  73 * bonding and packet priority.
  74 * With BQL support, we can increase TX ring safely.
  75 * There are no ill effects from too-large receive rings.
  76 */
  77#define TX_RING_SIZE	64
  78#define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
  79#define RX_RING_SIZE	64
  80
  81/* Operational parameters that usually are not changed. */
  82
  83/* Time in jiffies before concluding the transmitter is hung. */
  84#define TX_TIMEOUT	(2*HZ)
  85
  86#define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
  87
  88#include <linux/module.h>
  89#include <linux/moduleparam.h>
  90#include <linux/kernel.h>
  91#include <linux/string.h>
  92#include <linux/timer.h>
  93#include <linux/errno.h>
  94#include <linux/ioport.h>
  95#include <linux/interrupt.h>
  96#include <linux/pci.h>
  97#include <linux/of.h>
  98#include <linux/of_irq.h>
  99#include <linux/platform_device.h>
 100#include <linux/dma-mapping.h>
 101#include <linux/netdevice.h>
 102#include <linux/etherdevice.h>
 103#include <linux/skbuff.h>
 104#include <linux/init.h>
 105#include <linux/delay.h>
 106#include <linux/mii.h>
 107#include <linux/ethtool.h>
 108#include <linux/crc32.h>
 109#include <linux/if_vlan.h>
 110#include <linux/bitops.h>
 111#include <linux/workqueue.h>
 112#include <asm/processor.h>	/* Processor type for cache alignment. */
 113#include <asm/io.h>
 114#include <asm/irq.h>
 115#include <linux/uaccess.h>
 116#include <linux/dmi.h>
 117
 
 
 
 
 118MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 119MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 120MODULE_LICENSE("GPL");
 121
 122module_param(debug, int, 0);
 123module_param(rx_copybreak, int, 0);
 124module_param(avoid_D3, bool, 0);
 125MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
 126MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 127MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 128
 129#define MCAM_SIZE	32
 130#define VCAM_SIZE	32
 131
 132/*
 133		Theory of Operation
 134
 135I. Board Compatibility
 136
 137This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 138controller.
 139
 140II. Board-specific settings
 141
 142Boards with this chip are functional only in a bus-master PCI slot.
 143
 144Many operational settings are loaded from the EEPROM to the Config word at
 145offset 0x78. For most of these settings, this driver assumes that they are
 146correct.
 147If this driver is compiled to use PCI memory space operations the EEPROM
 148must be configured to enable memory ops.
 149
 150III. Driver operation
 151
 152IIIa. Ring buffers
 153
 154This driver uses two statically allocated fixed-size descriptor lists
 155formed into rings by a branch from the final descriptor to the beginning of
 156the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 157
 158IIIb/c. Transmit/Receive Structure
 159
 160This driver attempts to use a zero-copy receive and transmit scheme.
 161
 162Alas, all data buffers are required to start on a 32 bit boundary, so
 163the driver must often copy transmit packets into bounce buffers.
 164
 165The driver allocates full frame size skbuffs for the Rx ring buffers at
 166open() time and passes the skb->data field to the chip as receive data
 167buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 168a fresh skbuff is allocated and the frame is copied to the new skbuff.
 169When the incoming frame is larger, the skbuff is passed directly up the
 170protocol stack. Buffers consumed this way are replaced by newly allocated
 171skbuffs in the last phase of rhine_rx().
 172
 173The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 174using a full-sized skbuff for small frames vs. the copying costs of larger
 175frames. New boards are typically used in generously configured machines
 176and the underfilled buffers have negligible impact compared to the benefit of
 177a single allocation size, so the default value of zero results in never
 178copying packets. When copying is done, the cost is usually mitigated by using
 179a combined copy/checksum routine. Copying also preloads the cache, which is
 180most useful with small frames.
 181
 182Since the VIA chips are only able to transfer data to buffers on 32 bit
 183boundaries, the IP header at offset 14 in an ethernet frame isn't
 184longword aligned for further processing. Copying these unaligned buffers
 185has the beneficial effect of 16-byte aligning the IP header.
 186
 187IIId. Synchronization
 188
 189The driver runs as two independent, single-threaded flows of control. One
 190is the send-packet routine, which enforces single-threaded use by the
 191netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 192which is single threaded by the hardware and interrupt handling software.
 193
 194The send packet thread has partial control over the Tx ring. It locks the
 195netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 196the ring is not available it stops the transmit queue by
 197calling netif_stop_queue.
 198
 199The interrupt handler has exclusive control over the Rx ring and records stats
 200from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 201empty by incrementing the dirty_tx mark. If at least half of the entries in
 202the Rx ring are available the transmit queue is woken up if it was stopped.
 203
 204IV. Notes
 205
 206IVb. References
 207
 208Preliminary VT86C100A manual from http://www.via.com.tw/
 209http://www.scyld.com/expert/100mbps.html
 210http://www.scyld.com/expert/NWay.html
 211ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 212ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 213
 214
 215IVc. Errata
 216
 217The VT86C100A manual is not reliable information.
 218The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 219in significant performance degradation for bounce buffer copies on transmit
 220and unaligned IP headers on receive.
 221The chip does not pad to minimum transmit length.
 222
 223*/
 224
 225
 226/* This table drives the PCI probe routines. It's mostly boilerplate in all
 227   of the drivers, and will likely be provided by some future kernel.
 228   Note the matching code -- the first table entry matchs all 56** cards but
 229   second only the 1234 card.
 230*/
 231
 232enum rhine_revs {
 233	VT86C100A	= 0x00,
 234	VTunknown0	= 0x20,
 235	VT6102		= 0x40,
 236	VT8231		= 0x50,	/* Integrated MAC */
 237	VT8233		= 0x60,	/* Integrated MAC */
 238	VT8235		= 0x74,	/* Integrated MAC */
 239	VT8237		= 0x78,	/* Integrated MAC */
 240	VT8251		= 0x7C,	/* Integrated MAC */
 241	VT6105		= 0x80,
 242	VT6105_B0	= 0x83,
 243	VT6105L		= 0x8A,
 244	VT6107		= 0x8C,
 245	VTunknown2	= 0x8E,
 246	VT6105M		= 0x90,	/* Management adapter */
 247};
 248
 249enum rhine_quirks {
 250	rqWOL		= 0x0001,	/* Wake-On-LAN support */
 251	rqForceReset	= 0x0002,
 252	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
 253	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
 254	rqRhineI	= 0x0100,	/* See comment below */
 255	rqIntPHY	= 0x0200,	/* Integrated PHY */
 256	rqMgmt		= 0x0400,	/* Management adapter */
 257	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
 258					 * switched from PIO mode to MMIO
 259					 * (only applies to PCI)
 260					 */
 261};
 262/*
 263 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 264 * MMIO as well as for the collision counter and the Tx FIFO underflow
 265 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 266 */
 267
 268/* Beware of PCI posted writes */
 269#define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
 270
 271static const struct pci_device_id rhine_pci_tbl[] = {
 272	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
 273	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
 274	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
 275	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
 276	{ }	/* terminate list */
 277};
 278MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 279
 280/* OpenFirmware identifiers for platform-bus devices
 281 * The .data field is currently only used to store quirks
 282 */
 283static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
 284static const struct of_device_id rhine_of_tbl[] = {
 285	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
 286	{ }	/* terminate list */
 287};
 288MODULE_DEVICE_TABLE(of, rhine_of_tbl);
 289
 290/* Offsets to the device registers. */
 291enum register_offsets {
 292	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 293	ChipCmd1=0x09, TQWake=0x0A,
 294	IntrStatus=0x0C, IntrEnable=0x0E,
 295	MulticastFilter0=0x10, MulticastFilter1=0x14,
 296	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 297	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 298	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 299	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 300	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 301	StickyHW=0x83, IntrStatus2=0x84,
 302	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 303	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 304	WOLcrClr1=0xA6, WOLcgClr=0xA7,
 305	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 306};
 307
 308/* Bits in ConfigD */
 309enum backoff_bits {
 310	BackOptional=0x01, BackModify=0x02,
 311	BackCaptureEffect=0x04, BackRandom=0x08
 312};
 313
 314/* Bits in the TxConfig (TCR) register */
 315enum tcr_bits {
 316	TCR_PQEN=0x01,
 317	TCR_LB0=0x02,		/* loopback[0] */
 318	TCR_LB1=0x04,		/* loopback[1] */
 319	TCR_OFSET=0x08,
 320	TCR_RTGOPT=0x10,
 321	TCR_RTFT0=0x20,
 322	TCR_RTFT1=0x40,
 323	TCR_RTSF=0x80,
 324};
 325
 326/* Bits in the CamCon (CAMC) register */
 327enum camcon_bits {
 328	CAMC_CAMEN=0x01,
 329	CAMC_VCAMSL=0x02,
 330	CAMC_CAMWR=0x04,
 331	CAMC_CAMRD=0x08,
 332};
 333
 334/* Bits in the PCIBusConfig1 (BCR1) register */
 335enum bcr1_bits {
 336	BCR1_POT0=0x01,
 337	BCR1_POT1=0x02,
 338	BCR1_POT2=0x04,
 339	BCR1_CTFT0=0x08,
 340	BCR1_CTFT1=0x10,
 341	BCR1_CTSF=0x20,
 342	BCR1_TXQNOBK=0x40,	/* for VT6105 */
 343	BCR1_VIDFR=0x80,	/* for VT6105 */
 344	BCR1_MED0=0x40,		/* for VT6102 */
 345	BCR1_MED1=0x80,		/* for VT6102 */
 346};
 347
 348/* Registers we check that mmio and reg are the same. */
 349static const int mmio_verify_registers[] = {
 350	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 351	0
 352};
 353
 354/* Bits in the interrupt status/mask registers. */
 355enum intr_status_bits {
 356	IntrRxDone	= 0x0001,
 357	IntrTxDone	= 0x0002,
 358	IntrRxErr	= 0x0004,
 359	IntrTxError	= 0x0008,
 360	IntrRxEmpty	= 0x0020,
 361	IntrPCIErr	= 0x0040,
 362	IntrStatsMax	= 0x0080,
 363	IntrRxEarly	= 0x0100,
 364	IntrTxUnderrun	= 0x0210,
 365	IntrRxOverflow	= 0x0400,
 366	IntrRxDropped	= 0x0800,
 367	IntrRxNoBuf	= 0x1000,
 368	IntrTxAborted	= 0x2000,
 369	IntrLinkChange	= 0x4000,
 370	IntrRxWakeUp	= 0x8000,
 371	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
 372	IntrNormalSummary	= IntrRxDone | IntrTxDone,
 373	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
 374				  IntrTxUnderrun,
 375};
 376
 377/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 378enum wol_bits {
 379	WOLucast	= 0x10,
 380	WOLmagic	= 0x20,
 381	WOLbmcast	= 0x30,
 382	WOLlnkon	= 0x40,
 383	WOLlnkoff	= 0x80,
 384};
 385
 386/* The Rx and Tx buffer descriptors. */
 387struct rx_desc {
 388	__le32 rx_status;
 389	__le32 desc_length; /* Chain flag, Buffer/frame length */
 390	__le32 addr;
 391	__le32 next_desc;
 392};
 393struct tx_desc {
 394	__le32 tx_status;
 395	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
 396	__le32 addr;
 397	__le32 next_desc;
 398};
 399
 400/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 401#define TXDESC		0x00e08000
 402
 403enum rx_status_bits {
 404	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 405};
 406
 407/* Bits in *_desc.*_status */
 408enum desc_status_bits {
 409	DescOwn=0x80000000
 410};
 411
 412/* Bits in *_desc.*_length */
 413enum desc_length_bits {
 414	DescTag=0x00010000
 415};
 416
 417/* Bits in ChipCmd. */
 418enum chip_cmd_bits {
 419	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 420	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 421	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 422	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 423};
 424
 425struct rhine_stats {
 426	u64		packets;
 427	u64		bytes;
 428	struct u64_stats_sync syncp;
 429};
 430
 431struct rhine_private {
 432	/* Bit mask for configured VLAN ids */
 433	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 434
 435	/* Descriptor rings */
 436	struct rx_desc *rx_ring;
 437	struct tx_desc *tx_ring;
 438	dma_addr_t rx_ring_dma;
 439	dma_addr_t tx_ring_dma;
 440
 441	/* The addresses of receive-in-place skbuffs. */
 442	struct sk_buff *rx_skbuff[RX_RING_SIZE];
 443	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 444
 445	/* The saved address of a sent-in-place packet/buffer, for later free(). */
 446	struct sk_buff *tx_skbuff[TX_RING_SIZE];
 447	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 448
 449	/* Tx bounce buffers (Rhine-I only) */
 450	unsigned char *tx_buf[TX_RING_SIZE];
 451	unsigned char *tx_bufs;
 452	dma_addr_t tx_bufs_dma;
 453
 454	int irq;
 455	long pioaddr;
 456	struct net_device *dev;
 457	struct napi_struct napi;
 458	spinlock_t lock;
 459	struct mutex task_lock;
 460	bool task_enable;
 461	struct work_struct slow_event_task;
 462	struct work_struct reset_task;
 463
 464	u32 msg_enable;
 465
 466	/* Frequently used values: keep some adjacent for cache effect. */
 467	u32 quirks;
 468	unsigned int cur_rx;
 469	unsigned int cur_tx, dirty_tx;
 470	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
 471	struct rhine_stats rx_stats;
 472	struct rhine_stats tx_stats;
 473	u8 wolopts;
 474
 475	u8 tx_thresh, rx_thresh;
 476
 477	struct mii_if_info mii_if;
 478	void __iomem *base;
 479};
 480
 481#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
 482#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
 483#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
 484
 485#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
 486#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
 487#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
 488
 489#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
 490#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
 491#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
 492
 493#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
 494#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
 495#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
 496
 497
 498static int  mdio_read(struct net_device *dev, int phy_id, int location);
 499static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 500static int  rhine_open(struct net_device *dev);
 501static void rhine_reset_task(struct work_struct *work);
 502static void rhine_slow_event_task(struct work_struct *work);
 503static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
 504static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 505				  struct net_device *dev);
 506static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 507static void rhine_tx(struct net_device *dev);
 508static int rhine_rx(struct net_device *dev, int limit);
 509static void rhine_set_rx_mode(struct net_device *dev);
 510static void rhine_get_stats64(struct net_device *dev,
 511			      struct rtnl_link_stats64 *stats);
 512static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 513static const struct ethtool_ops netdev_ethtool_ops;
 514static int  rhine_close(struct net_device *dev);
 515static int rhine_vlan_rx_add_vid(struct net_device *dev,
 516				 __be16 proto, u16 vid);
 517static int rhine_vlan_rx_kill_vid(struct net_device *dev,
 518				  __be16 proto, u16 vid);
 519static void rhine_restart_tx(struct net_device *dev);
 520
 521static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
 522{
 523	void __iomem *ioaddr = rp->base;
 524	int i;
 525
 526	for (i = 0; i < 1024; i++) {
 527		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
 528
 529		if (low ^ has_mask_bits)
 530			break;
 531		udelay(10);
 532	}
 533	if (i > 64) {
 534		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
 535			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
 536	}
 537}
 538
 539static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
 540{
 541	rhine_wait_bit(rp, reg, mask, false);
 542}
 543
 544static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
 545{
 546	rhine_wait_bit(rp, reg, mask, true);
 547}
 548
 549static u32 rhine_get_events(struct rhine_private *rp)
 550{
 551	void __iomem *ioaddr = rp->base;
 552	u32 intr_status;
 553
 554	intr_status = ioread16(ioaddr + IntrStatus);
 555	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 556	if (rp->quirks & rqStatusWBRace)
 557		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 558	return intr_status;
 559}
 560
 561static void rhine_ack_events(struct rhine_private *rp, u32 mask)
 562{
 563	void __iomem *ioaddr = rp->base;
 564
 565	if (rp->quirks & rqStatusWBRace)
 566		iowrite8(mask >> 16, ioaddr + IntrStatus2);
 567	iowrite16(mask, ioaddr + IntrStatus);
 568}
 569
 570/*
 571 * Get power related registers into sane state.
 572 * Notify user about past WOL event.
 573 */
 574static void rhine_power_init(struct net_device *dev)
 575{
 576	struct rhine_private *rp = netdev_priv(dev);
 577	void __iomem *ioaddr = rp->base;
 578	u16 wolstat;
 579
 580	if (rp->quirks & rqWOL) {
 581		/* Make sure chip is in power state D0 */
 582		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 583
 584		/* Disable "force PME-enable" */
 585		iowrite8(0x80, ioaddr + WOLcgClr);
 586
 587		/* Clear power-event config bits (WOL) */
 588		iowrite8(0xFF, ioaddr + WOLcrClr);
 589		/* More recent cards can manage two additional patterns */
 590		if (rp->quirks & rq6patterns)
 591			iowrite8(0x03, ioaddr + WOLcrClr1);
 592
 593		/* Save power-event status bits */
 594		wolstat = ioread8(ioaddr + PwrcsrSet);
 595		if (rp->quirks & rq6patterns)
 596			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 597
 598		/* Clear power-event status bits */
 599		iowrite8(0xFF, ioaddr + PwrcsrClr);
 600		if (rp->quirks & rq6patterns)
 601			iowrite8(0x03, ioaddr + PwrcsrClr1);
 602
 603		if (wolstat) {
 604			char *reason;
 605			switch (wolstat) {
 606			case WOLmagic:
 607				reason = "Magic packet";
 608				break;
 609			case WOLlnkon:
 610				reason = "Link went up";
 611				break;
 612			case WOLlnkoff:
 613				reason = "Link went down";
 614				break;
 615			case WOLucast:
 616				reason = "Unicast packet";
 617				break;
 618			case WOLbmcast:
 619				reason = "Multicast/broadcast packet";
 620				break;
 621			default:
 622				reason = "Unknown";
 623			}
 624			netdev_info(dev, "Woke system up. Reason: %s\n",
 625				    reason);
 626		}
 627	}
 628}
 629
 630static void rhine_chip_reset(struct net_device *dev)
 631{
 632	struct rhine_private *rp = netdev_priv(dev);
 633	void __iomem *ioaddr = rp->base;
 634	u8 cmd1;
 635
 636	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 637	IOSYNC;
 638
 639	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 640		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
 641
 642		/* Force reset */
 643		if (rp->quirks & rqForceReset)
 644			iowrite8(0x40, ioaddr + MiscCmd);
 645
 646		/* Reset can take somewhat longer (rare) */
 647		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
 648	}
 649
 650	cmd1 = ioread8(ioaddr + ChipCmd1);
 651	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
 652		   "failed" : "succeeded");
 653}
 654
 655static void enable_mmio(long pioaddr, u32 quirks)
 656{
 657	int n;
 658
 659	if (quirks & rqNeedEnMMIO) {
 660		if (quirks & rqRhineI) {
 661			/* More recent docs say that this bit is reserved */
 662			n = inb(pioaddr + ConfigA) | 0x20;
 663			outb(n, pioaddr + ConfigA);
 664		} else {
 665			n = inb(pioaddr + ConfigD) | 0x80;
 666			outb(n, pioaddr + ConfigD);
 667		}
 668	}
 669}
 670
 671static inline int verify_mmio(struct device *hwdev,
 672			      long pioaddr,
 673			      void __iomem *ioaddr,
 674			      u32 quirks)
 675{
 676	if (quirks & rqNeedEnMMIO) {
 677		int i = 0;
 678
 679		/* Check that selected MMIO registers match the PIO ones */
 680		while (mmio_verify_registers[i]) {
 681			int reg = mmio_verify_registers[i++];
 682			unsigned char a = inb(pioaddr+reg);
 683			unsigned char b = readb(ioaddr+reg);
 684
 685			if (a != b) {
 686				dev_err(hwdev,
 687					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
 688					reg, a, b);
 689				return -EIO;
 690			}
 691		}
 692	}
 693	return 0;
 694}
 695
 696/*
 697 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 698 * (plus 0x6C for Rhine-I/II)
 699 */
 700static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 701{
 702	struct rhine_private *rp = netdev_priv(dev);
 703	void __iomem *ioaddr = rp->base;
 704	int i;
 705
 706	outb(0x20, pioaddr + MACRegEEcsr);
 707	for (i = 0; i < 1024; i++) {
 708		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
 709			break;
 710	}
 711	if (i > 512)
 712		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 713
 714	/*
 715	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 716	 * MMIO. If reloading EEPROM was done first this could be avoided, but
 717	 * it is not known if that still works with the "win98-reboot" problem.
 718	 */
 719	enable_mmio(pioaddr, rp->quirks);
 720
 721	/* Turn off EEPROM-controlled wake-up (magic packet) */
 722	if (rp->quirks & rqWOL)
 723		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 724
 725}
 726
 727#ifdef CONFIG_NET_POLL_CONTROLLER
 728static void rhine_poll(struct net_device *dev)
 729{
 730	struct rhine_private *rp = netdev_priv(dev);
 731	const int irq = rp->irq;
 732
 733	disable_irq(irq);
 734	rhine_interrupt(irq, dev);
 735	enable_irq(irq);
 736}
 737#endif
 738
 739static void rhine_kick_tx_threshold(struct rhine_private *rp)
 740{
 741	if (rp->tx_thresh < 0xe0) {
 742		void __iomem *ioaddr = rp->base;
 743
 744		rp->tx_thresh += 0x20;
 745		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
 746	}
 747}
 748
 749static void rhine_tx_err(struct rhine_private *rp, u32 status)
 750{
 751	struct net_device *dev = rp->dev;
 752
 753	if (status & IntrTxAborted) {
 754		netif_info(rp, tx_err, dev,
 755			   "Abort %08x, frame dropped\n", status);
 756	}
 757
 758	if (status & IntrTxUnderrun) {
 759		rhine_kick_tx_threshold(rp);
 760		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
 761			   "Tx threshold now %02x\n", rp->tx_thresh);
 762	}
 763
 764	if (status & IntrTxDescRace)
 765		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
 766
 767	if ((status & IntrTxError) &&
 768	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
 769		rhine_kick_tx_threshold(rp);
 770		netif_info(rp, tx_err, dev, "Unspecified error. "
 771			   "Tx threshold now %02x\n", rp->tx_thresh);
 772	}
 773
 774	rhine_restart_tx(dev);
 775}
 776
 777static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
 778{
 779	void __iomem *ioaddr = rp->base;
 780	struct net_device_stats *stats = &rp->dev->stats;
 781
 782	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
 783	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
 784
 785	/*
 786	 * Clears the "tally counters" for CRC errors and missed frames(?).
 787	 * It has been reported that some chips need a write of 0 to clear
 788	 * these, for others the counters are set to 1 when written to and
 789	 * instead cleared when read. So we clear them both ways ...
 790	 */
 791	iowrite32(0, ioaddr + RxMissed);
 792	ioread16(ioaddr + RxCRCErrs);
 793	ioread16(ioaddr + RxMissed);
 794}
 795
 796#define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
 797				 IntrRxErr | \
 798				 IntrRxEmpty | \
 799				 IntrRxOverflow	| \
 800				 IntrRxDropped | \
 801				 IntrRxNoBuf | \
 802				 IntrRxWakeUp)
 803
 804#define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
 805				 IntrTxAborted | \
 806				 IntrTxUnderrun | \
 807				 IntrTxDescRace)
 808#define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
 809
 810#define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
 811				 RHINE_EVENT_NAPI_TX | \
 812				 IntrStatsMax)
 813#define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
 814#define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
 815
 816static int rhine_napipoll(struct napi_struct *napi, int budget)
 817{
 818	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 819	struct net_device *dev = rp->dev;
 820	void __iomem *ioaddr = rp->base;
 821	u16 enable_mask = RHINE_EVENT & 0xffff;
 822	int work_done = 0;
 823	u32 status;
 824
 825	status = rhine_get_events(rp);
 826	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
 827
 828	if (status & RHINE_EVENT_NAPI_RX)
 829		work_done += rhine_rx(dev, budget);
 830
 831	if (status & RHINE_EVENT_NAPI_TX) {
 832		if (status & RHINE_EVENT_NAPI_TX_ERR) {
 833			/* Avoid scavenging before Tx engine turned off */
 834			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
 835			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
 836				netif_warn(rp, tx_err, dev, "Tx still on\n");
 837		}
 838
 839		rhine_tx(dev);
 840
 841		if (status & RHINE_EVENT_NAPI_TX_ERR)
 842			rhine_tx_err(rp, status);
 843	}
 844
 845	if (status & IntrStatsMax) {
 846		spin_lock(&rp->lock);
 847		rhine_update_rx_crc_and_missed_errord(rp);
 848		spin_unlock(&rp->lock);
 849	}
 850
 851	if (status & RHINE_EVENT_SLOW) {
 852		enable_mask &= ~RHINE_EVENT_SLOW;
 853		schedule_work(&rp->slow_event_task);
 854	}
 855
 856	if (work_done < budget) {
 857		napi_complete_done(napi, work_done);
 858		iowrite16(enable_mask, ioaddr + IntrEnable);
 859	}
 860	return work_done;
 861}
 862
 863static void rhine_hw_init(struct net_device *dev, long pioaddr)
 864{
 865	struct rhine_private *rp = netdev_priv(dev);
 866
 867	/* Reset the chip to erase previous misconfiguration. */
 868	rhine_chip_reset(dev);
 869
 870	/* Rhine-I needs extra time to recuperate before EEPROM reload */
 871	if (rp->quirks & rqRhineI)
 872		msleep(5);
 873
 874	/* Reload EEPROM controlled bytes cleared by soft reset */
 875	if (dev_is_pci(dev->dev.parent))
 876		rhine_reload_eeprom(pioaddr, dev);
 877}
 878
 879static const struct net_device_ops rhine_netdev_ops = {
 880	.ndo_open		 = rhine_open,
 881	.ndo_stop		 = rhine_close,
 882	.ndo_start_xmit		 = rhine_start_tx,
 883	.ndo_get_stats64	 = rhine_get_stats64,
 884	.ndo_set_rx_mode	 = rhine_set_rx_mode,
 885	.ndo_validate_addr	 = eth_validate_addr,
 886	.ndo_set_mac_address 	 = eth_mac_addr,
 887	.ndo_eth_ioctl		 = netdev_ioctl,
 888	.ndo_tx_timeout 	 = rhine_tx_timeout,
 889	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
 890	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
 891#ifdef CONFIG_NET_POLL_CONTROLLER
 892	.ndo_poll_controller	 = rhine_poll,
 893#endif
 894};
 895
 896static int rhine_init_one_common(struct device *hwdev, u32 quirks,
 897				 long pioaddr, void __iomem *ioaddr, int irq)
 898{
 899	struct net_device *dev;
 900	struct rhine_private *rp;
 901	int i, rc, phy_id;
 902	u8 addr[ETH_ALEN];
 903	const char *name;
 904
 905	/* this should always be supported */
 906	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
 907	if (rc) {
 908		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
 909		goto err_out;
 910	}
 911
 912	dev = alloc_etherdev(sizeof(struct rhine_private));
 913	if (!dev) {
 914		rc = -ENOMEM;
 915		goto err_out;
 916	}
 917	SET_NETDEV_DEV(dev, hwdev);
 918
 919	rp = netdev_priv(dev);
 920	rp->dev = dev;
 921	rp->quirks = quirks;
 922	rp->pioaddr = pioaddr;
 923	rp->base = ioaddr;
 924	rp->irq = irq;
 925	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 926
 927	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
 928
 929	u64_stats_init(&rp->tx_stats.syncp);
 930	u64_stats_init(&rp->rx_stats.syncp);
 931
 932	/* Get chip registers into a sane state */
 933	rhine_power_init(dev);
 934	rhine_hw_init(dev, pioaddr);
 935
 936	for (i = 0; i < 6; i++)
 937		addr[i] = ioread8(ioaddr + StationAddr + i);
 938	eth_hw_addr_set(dev, addr);
 939
 940	if (!is_valid_ether_addr(dev->dev_addr)) {
 941		/* Report it and use a random ethernet address instead */
 942		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
 943		eth_hw_addr_random(dev);
 944		netdev_info(dev, "Using random MAC address: %pM\n",
 945			    dev->dev_addr);
 946	}
 947
 948	/* For Rhine-I/II, phy_id is loaded from EEPROM */
 949	if (!phy_id)
 950		phy_id = ioread8(ioaddr + 0x6C);
 951
 952	spin_lock_init(&rp->lock);
 953	mutex_init(&rp->task_lock);
 954	INIT_WORK(&rp->reset_task, rhine_reset_task);
 955	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
 956
 957	rp->mii_if.dev = dev;
 958	rp->mii_if.mdio_read = mdio_read;
 959	rp->mii_if.mdio_write = mdio_write;
 960	rp->mii_if.phy_id_mask = 0x1f;
 961	rp->mii_if.reg_num_mask = 0x1f;
 962
 963	/* The chip-specific entries in the device structure. */
 964	dev->netdev_ops = &rhine_netdev_ops;
 965	dev->ethtool_ops = &netdev_ethtool_ops;
 966	dev->watchdog_timeo = TX_TIMEOUT;
 967
 968	netif_napi_add(dev, &rp->napi, rhine_napipoll);
 969
 970	if (rp->quirks & rqRhineI)
 971		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 972
 973	if (rp->quirks & rqMgmt)
 974		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
 975				 NETIF_F_HW_VLAN_CTAG_RX |
 976				 NETIF_F_HW_VLAN_CTAG_FILTER;
 977
 978	/* dev->name not defined before register_netdev()! */
 979	rc = register_netdev(dev);
 980	if (rc)
 981		goto err_out_free_netdev;
 982
 983	if (rp->quirks & rqRhineI)
 984		name = "Rhine";
 985	else if (rp->quirks & rqStatusWBRace)
 986		name = "Rhine II";
 987	else if (rp->quirks & rqMgmt)
 988		name = "Rhine III (Management Adapter)";
 989	else
 990		name = "Rhine III";
 991
 992	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
 993		    name, ioaddr, dev->dev_addr, rp->irq);
 994
 995	dev_set_drvdata(hwdev, dev);
 996
 997	{
 998		u16 mii_cmd;
 999		int mii_status = mdio_read(dev, phy_id, 1);
1000		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1001		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1002		if (mii_status != 0xffff && mii_status != 0x0000) {
1003			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1004			netdev_info(dev,
1005				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1006				    phy_id,
1007				    mii_status, rp->mii_if.advertising,
1008				    mdio_read(dev, phy_id, 5));
1009
1010			/* set IFF_RUNNING */
1011			if (mii_status & BMSR_LSTATUS)
1012				netif_carrier_on(dev);
1013			else
1014				netif_carrier_off(dev);
1015
1016		}
1017	}
1018	rp->mii_if.phy_id = phy_id;
1019	if (avoid_D3)
1020		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1021
1022	return 0;
1023
1024err_out_free_netdev:
1025	free_netdev(dev);
1026err_out:
1027	return rc;
1028}
1029
1030static int rhine_init_one_pci(struct pci_dev *pdev,
1031			      const struct pci_device_id *ent)
1032{
1033	struct device *hwdev = &pdev->dev;
1034	int rc;
1035	long pioaddr, memaddr;
1036	void __iomem *ioaddr;
1037	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1038
1039/* This driver was written to use PCI memory space. Some early versions
1040 * of the Rhine may only work correctly with I/O space accesses.
1041 * TODO: determine for which revisions this is true and assign the flag
1042 *	 in code as opposed to this Kconfig option (???)
1043 */
1044#ifdef CONFIG_VIA_RHINE_MMIO
1045	u32 quirks = rqNeedEnMMIO;
1046#else
1047	u32 quirks = 0;
1048#endif
1049
 
 
 
 
 
1050	rc = pci_enable_device(pdev);
1051	if (rc)
1052		goto err_out;
1053
1054	if (pdev->revision < VTunknown0) {
1055		quirks |= rqRhineI;
1056	} else if (pdev->revision >= VT6102) {
1057		quirks |= rqWOL | rqForceReset;
1058		if (pdev->revision < VT6105) {
1059			quirks |= rqStatusWBRace;
1060		} else {
1061			quirks |= rqIntPHY;
1062			if (pdev->revision >= VT6105_B0)
1063				quirks |= rq6patterns;
1064			if (pdev->revision >= VT6105M)
1065				quirks |= rqMgmt;
1066		}
1067	}
1068
1069	/* sanity check */
1070	if ((pci_resource_len(pdev, 0) < io_size) ||
1071	    (pci_resource_len(pdev, 1) < io_size)) {
1072		rc = -EIO;
1073		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1074		goto err_out_pci_disable;
1075	}
1076
1077	pioaddr = pci_resource_start(pdev, 0);
1078	memaddr = pci_resource_start(pdev, 1);
1079
1080	pci_set_master(pdev);
1081
1082	rc = pci_request_regions(pdev, DRV_NAME);
1083	if (rc)
1084		goto err_out_pci_disable;
1085
1086	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1087	if (!ioaddr) {
1088		rc = -EIO;
1089		dev_err(hwdev,
1090			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1091			dev_name(hwdev), io_size, memaddr);
1092		goto err_out_free_res;
1093	}
1094
1095	enable_mmio(pioaddr, quirks);
1096
1097	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1098	if (rc)
1099		goto err_out_unmap;
1100
1101	rc = rhine_init_one_common(&pdev->dev, quirks,
1102				   pioaddr, ioaddr, pdev->irq);
1103	if (!rc)
1104		return 0;
1105
1106err_out_unmap:
1107	pci_iounmap(pdev, ioaddr);
1108err_out_free_res:
1109	pci_release_regions(pdev);
1110err_out_pci_disable:
1111	pci_disable_device(pdev);
1112err_out:
1113	return rc;
1114}
1115
1116static int rhine_init_one_platform(struct platform_device *pdev)
1117{
 
1118	const u32 *quirks;
1119	int irq;
1120	void __iomem *ioaddr;
1121
1122	quirks = of_device_get_match_data(&pdev->dev);
1123	if (!quirks)
1124		return -EINVAL;
1125
1126	ioaddr = devm_platform_ioremap_resource(pdev, 0);
1127	if (IS_ERR(ioaddr))
1128		return PTR_ERR(ioaddr);
1129
1130	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1131	if (!irq)
1132		return -EINVAL;
1133
 
 
 
 
1134	return rhine_init_one_common(&pdev->dev, *quirks,
1135				     (long)ioaddr, ioaddr, irq);
1136}
1137
1138static int alloc_ring(struct net_device* dev)
1139{
1140	struct rhine_private *rp = netdev_priv(dev);
1141	struct device *hwdev = dev->dev.parent;
1142	void *ring;
1143	dma_addr_t ring_dma;
1144
1145	ring = dma_alloc_coherent(hwdev,
1146				  RX_RING_SIZE * sizeof(struct rx_desc) +
1147				  TX_RING_SIZE * sizeof(struct tx_desc),
1148				  &ring_dma,
1149				  GFP_ATOMIC);
1150	if (!ring) {
1151		netdev_err(dev, "Could not allocate DMA memory\n");
1152		return -ENOMEM;
1153	}
1154	if (rp->quirks & rqRhineI) {
1155		rp->tx_bufs = dma_alloc_coherent(hwdev,
1156						 PKT_BUF_SZ * TX_RING_SIZE,
1157						 &rp->tx_bufs_dma,
1158						 GFP_ATOMIC);
1159		if (rp->tx_bufs == NULL) {
1160			dma_free_coherent(hwdev,
1161					  RX_RING_SIZE * sizeof(struct rx_desc) +
1162					  TX_RING_SIZE * sizeof(struct tx_desc),
1163					  ring, ring_dma);
1164			return -ENOMEM;
1165		}
1166	}
1167
1168	rp->rx_ring = ring;
1169	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1170	rp->rx_ring_dma = ring_dma;
1171	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1172
1173	return 0;
1174}
1175
1176static void free_ring(struct net_device* dev)
1177{
1178	struct rhine_private *rp = netdev_priv(dev);
1179	struct device *hwdev = dev->dev.parent;
1180
1181	dma_free_coherent(hwdev,
1182			  RX_RING_SIZE * sizeof(struct rx_desc) +
1183			  TX_RING_SIZE * sizeof(struct tx_desc),
1184			  rp->rx_ring, rp->rx_ring_dma);
1185	rp->tx_ring = NULL;
1186
1187	if (rp->tx_bufs)
1188		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1189				  rp->tx_bufs, rp->tx_bufs_dma);
1190
1191	rp->tx_bufs = NULL;
1192
1193}
1194
1195struct rhine_skb_dma {
1196	struct sk_buff *skb;
1197	dma_addr_t dma;
1198};
1199
1200static inline int rhine_skb_dma_init(struct net_device *dev,
1201				     struct rhine_skb_dma *sd)
1202{
1203	struct rhine_private *rp = netdev_priv(dev);
1204	struct device *hwdev = dev->dev.parent;
1205	const int size = rp->rx_buf_sz;
1206
1207	sd->skb = netdev_alloc_skb(dev, size);
1208	if (!sd->skb)
1209		return -ENOMEM;
1210
1211	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1212	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1213		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1214		dev_kfree_skb_any(sd->skb);
1215		return -EIO;
1216	}
1217
1218	return 0;
1219}
1220
1221static void rhine_reset_rbufs(struct rhine_private *rp)
1222{
1223	int i;
1224
1225	rp->cur_rx = 0;
1226
1227	for (i = 0; i < RX_RING_SIZE; i++)
1228		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1229}
1230
1231static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1232					   struct rhine_skb_dma *sd, int entry)
1233{
1234	rp->rx_skbuff_dma[entry] = sd->dma;
1235	rp->rx_skbuff[entry] = sd->skb;
1236
1237	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1238	dma_wmb();
1239}
1240
1241static void free_rbufs(struct net_device* dev);
1242
1243static int alloc_rbufs(struct net_device *dev)
1244{
1245	struct rhine_private *rp = netdev_priv(dev);
1246	dma_addr_t next;
1247	int rc, i;
1248
1249	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1250	next = rp->rx_ring_dma;
1251
1252	/* Init the ring entries */
1253	for (i = 0; i < RX_RING_SIZE; i++) {
1254		rp->rx_ring[i].rx_status = 0;
1255		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1256		next += sizeof(struct rx_desc);
1257		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1258		rp->rx_skbuff[i] = NULL;
1259	}
1260	/* Mark the last entry as wrapping the ring. */
1261	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1262
1263	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1264	for (i = 0; i < RX_RING_SIZE; i++) {
1265		struct rhine_skb_dma sd;
1266
1267		rc = rhine_skb_dma_init(dev, &sd);
1268		if (rc < 0) {
1269			free_rbufs(dev);
1270			goto out;
1271		}
1272
1273		rhine_skb_dma_nic_store(rp, &sd, i);
1274	}
1275
1276	rhine_reset_rbufs(rp);
1277out:
1278	return rc;
1279}
1280
1281static void free_rbufs(struct net_device* dev)
1282{
1283	struct rhine_private *rp = netdev_priv(dev);
1284	struct device *hwdev = dev->dev.parent;
1285	int i;
1286
1287	/* Free all the skbuffs in the Rx queue. */
1288	for (i = 0; i < RX_RING_SIZE; i++) {
1289		rp->rx_ring[i].rx_status = 0;
1290		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1291		if (rp->rx_skbuff[i]) {
1292			dma_unmap_single(hwdev,
1293					 rp->rx_skbuff_dma[i],
1294					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1295			dev_kfree_skb(rp->rx_skbuff[i]);
1296		}
1297		rp->rx_skbuff[i] = NULL;
1298	}
1299}
1300
1301static void alloc_tbufs(struct net_device* dev)
1302{
1303	struct rhine_private *rp = netdev_priv(dev);
1304	dma_addr_t next;
1305	int i;
1306
1307	rp->dirty_tx = rp->cur_tx = 0;
1308	next = rp->tx_ring_dma;
1309	for (i = 0; i < TX_RING_SIZE; i++) {
1310		rp->tx_skbuff[i] = NULL;
1311		rp->tx_ring[i].tx_status = 0;
1312		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1313		next += sizeof(struct tx_desc);
1314		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1315		if (rp->quirks & rqRhineI)
1316			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1317	}
1318	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1319
1320	netdev_reset_queue(dev);
1321}
1322
1323static void free_tbufs(struct net_device* dev)
1324{
1325	struct rhine_private *rp = netdev_priv(dev);
1326	struct device *hwdev = dev->dev.parent;
1327	int i;
1328
1329	for (i = 0; i < TX_RING_SIZE; i++) {
1330		rp->tx_ring[i].tx_status = 0;
1331		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1332		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1333		if (rp->tx_skbuff[i]) {
1334			if (rp->tx_skbuff_dma[i]) {
1335				dma_unmap_single(hwdev,
1336						 rp->tx_skbuff_dma[i],
1337						 rp->tx_skbuff[i]->len,
1338						 DMA_TO_DEVICE);
1339			}
1340			dev_kfree_skb(rp->tx_skbuff[i]);
1341		}
1342		rp->tx_skbuff[i] = NULL;
1343		rp->tx_buf[i] = NULL;
1344	}
1345}
1346
1347static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1348{
1349	struct rhine_private *rp = netdev_priv(dev);
1350	void __iomem *ioaddr = rp->base;
1351
1352	if (!rp->mii_if.force_media)
1353		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1354
1355	if (rp->mii_if.full_duplex)
1356	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1357		   ioaddr + ChipCmd1);
1358	else
1359	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1360		   ioaddr + ChipCmd1);
1361
1362	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1363		   rp->mii_if.force_media, netif_carrier_ok(dev));
1364}
1365
1366/* Called after status of force_media possibly changed */
1367static void rhine_set_carrier(struct mii_if_info *mii)
1368{
1369	struct net_device *dev = mii->dev;
1370	struct rhine_private *rp = netdev_priv(dev);
1371
1372	if (mii->force_media) {
1373		/* autoneg is off: Link is always assumed to be up */
1374		if (!netif_carrier_ok(dev))
1375			netif_carrier_on(dev);
1376	}
1377
1378	rhine_check_media(dev, 0);
1379
1380	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1381		   mii->force_media, netif_carrier_ok(dev));
1382}
1383
1384/**
1385 * rhine_set_cam - set CAM multicast filters
1386 * @ioaddr: register block of this Rhine
1387 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1388 * @addr: multicast address (6 bytes)
1389 *
1390 * Load addresses into multicast filters.
1391 */
1392static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1393{
1394	int i;
1395
1396	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1397	wmb();
1398
1399	/* Paranoid -- idx out of range should never happen */
1400	idx &= (MCAM_SIZE - 1);
1401
1402	iowrite8((u8) idx, ioaddr + CamAddr);
1403
1404	for (i = 0; i < 6; i++, addr++)
1405		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1406	udelay(10);
1407	wmb();
1408
1409	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1410	udelay(10);
1411
1412	iowrite8(0, ioaddr + CamCon);
1413}
1414
1415/**
1416 * rhine_set_vlan_cam - set CAM VLAN filters
1417 * @ioaddr: register block of this Rhine
1418 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1419 * @addr: VLAN ID (2 bytes)
1420 *
1421 * Load addresses into VLAN filters.
1422 */
1423static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1424{
1425	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1426	wmb();
1427
1428	/* Paranoid -- idx out of range should never happen */
1429	idx &= (VCAM_SIZE - 1);
1430
1431	iowrite8((u8) idx, ioaddr + CamAddr);
1432
1433	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1434	udelay(10);
1435	wmb();
1436
1437	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1438	udelay(10);
1439
1440	iowrite8(0, ioaddr + CamCon);
1441}
1442
1443/**
1444 * rhine_set_cam_mask - set multicast CAM mask
1445 * @ioaddr: register block of this Rhine
1446 * @mask: multicast CAM mask
1447 *
1448 * Mask sets multicast filters active/inactive.
1449 */
1450static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1451{
1452	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1453	wmb();
1454
1455	/* write mask */
1456	iowrite32(mask, ioaddr + CamMask);
1457
1458	/* disable CAMEN */
1459	iowrite8(0, ioaddr + CamCon);
1460}
1461
1462/**
1463 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1464 * @ioaddr: register block of this Rhine
1465 * @mask: VLAN CAM mask
1466 *
1467 * Mask sets VLAN filters active/inactive.
1468 */
1469static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1470{
1471	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1472	wmb();
1473
1474	/* write mask */
1475	iowrite32(mask, ioaddr + CamMask);
1476
1477	/* disable CAMEN */
1478	iowrite8(0, ioaddr + CamCon);
1479}
1480
1481/**
1482 * rhine_init_cam_filter - initialize CAM filters
1483 * @dev: network device
1484 *
1485 * Initialize (disable) hardware VLAN and multicast support on this
1486 * Rhine.
1487 */
1488static void rhine_init_cam_filter(struct net_device *dev)
1489{
1490	struct rhine_private *rp = netdev_priv(dev);
1491	void __iomem *ioaddr = rp->base;
1492
1493	/* Disable all CAMs */
1494	rhine_set_vlan_cam_mask(ioaddr, 0);
1495	rhine_set_cam_mask(ioaddr, 0);
1496
1497	/* disable hardware VLAN support */
1498	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1499	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1500}
1501
1502/**
1503 * rhine_update_vcam - update VLAN CAM filters
1504 * @dev: rhine_private data of this Rhine
1505 *
1506 * Update VLAN CAM filters to match configuration change.
1507 */
1508static void rhine_update_vcam(struct net_device *dev)
1509{
1510	struct rhine_private *rp = netdev_priv(dev);
1511	void __iomem *ioaddr = rp->base;
1512	u16 vid;
1513	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1514	unsigned int i = 0;
1515
1516	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1517		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1518		vCAMmask |= 1 << i;
1519		if (++i >= VCAM_SIZE)
1520			break;
1521	}
1522	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1523}
1524
1525static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1526{
1527	struct rhine_private *rp = netdev_priv(dev);
1528
1529	spin_lock_bh(&rp->lock);
1530	set_bit(vid, rp->active_vlans);
1531	rhine_update_vcam(dev);
1532	spin_unlock_bh(&rp->lock);
1533	return 0;
1534}
1535
1536static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1537{
1538	struct rhine_private *rp = netdev_priv(dev);
1539
1540	spin_lock_bh(&rp->lock);
1541	clear_bit(vid, rp->active_vlans);
1542	rhine_update_vcam(dev);
1543	spin_unlock_bh(&rp->lock);
1544	return 0;
1545}
1546
1547static void init_registers(struct net_device *dev)
1548{
1549	struct rhine_private *rp = netdev_priv(dev);
1550	void __iomem *ioaddr = rp->base;
1551	int i;
1552
1553	for (i = 0; i < 6; i++)
1554		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1555
1556	/* Initialize other registers. */
1557	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1558	/* Configure initial FIFO thresholds. */
1559	iowrite8(0x20, ioaddr + TxConfig);
1560	rp->tx_thresh = 0x20;
1561	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1562
1563	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1564	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1565
1566	rhine_set_rx_mode(dev);
1567
1568	if (rp->quirks & rqMgmt)
1569		rhine_init_cam_filter(dev);
1570
1571	napi_enable(&rp->napi);
1572
1573	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1574
1575	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1576	       ioaddr + ChipCmd);
1577	rhine_check_media(dev, 1);
1578}
1579
1580/* Enable MII link status auto-polling (required for IntrLinkChange) */
1581static void rhine_enable_linkmon(struct rhine_private *rp)
1582{
1583	void __iomem *ioaddr = rp->base;
1584
1585	iowrite8(0, ioaddr + MIICmd);
1586	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1587	iowrite8(0x80, ioaddr + MIICmd);
1588
1589	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1590
1591	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1592}
1593
1594/* Disable MII link status auto-polling (required for MDIO access) */
1595static void rhine_disable_linkmon(struct rhine_private *rp)
1596{
1597	void __iomem *ioaddr = rp->base;
1598
1599	iowrite8(0, ioaddr + MIICmd);
1600
1601	if (rp->quirks & rqRhineI) {
1602		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1603
1604		/* Can be called from ISR. Evil. */
1605		mdelay(1);
1606
1607		/* 0x80 must be set immediately before turning it off */
1608		iowrite8(0x80, ioaddr + MIICmd);
1609
1610		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1611
1612		/* Heh. Now clear 0x80 again. */
1613		iowrite8(0, ioaddr + MIICmd);
1614	}
1615	else
1616		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1617}
1618
1619/* Read and write over the MII Management Data I/O (MDIO) interface. */
1620
1621static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1622{
1623	struct rhine_private *rp = netdev_priv(dev);
1624	void __iomem *ioaddr = rp->base;
1625	int result;
1626
1627	rhine_disable_linkmon(rp);
1628
1629	/* rhine_disable_linkmon already cleared MIICmd */
1630	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1631	iowrite8(regnum, ioaddr + MIIRegAddr);
1632	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1633	rhine_wait_bit_low(rp, MIICmd, 0x40);
1634	result = ioread16(ioaddr + MIIData);
1635
1636	rhine_enable_linkmon(rp);
1637	return result;
1638}
1639
1640static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1641{
1642	struct rhine_private *rp = netdev_priv(dev);
1643	void __iomem *ioaddr = rp->base;
1644
1645	rhine_disable_linkmon(rp);
1646
1647	/* rhine_disable_linkmon already cleared MIICmd */
1648	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1649	iowrite8(regnum, ioaddr + MIIRegAddr);
1650	iowrite16(value, ioaddr + MIIData);
1651	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1652	rhine_wait_bit_low(rp, MIICmd, 0x20);
1653
1654	rhine_enable_linkmon(rp);
1655}
1656
1657static void rhine_task_disable(struct rhine_private *rp)
1658{
1659	mutex_lock(&rp->task_lock);
1660	rp->task_enable = false;
1661	mutex_unlock(&rp->task_lock);
1662
1663	cancel_work_sync(&rp->slow_event_task);
1664	cancel_work_sync(&rp->reset_task);
1665}
1666
1667static void rhine_task_enable(struct rhine_private *rp)
1668{
1669	mutex_lock(&rp->task_lock);
1670	rp->task_enable = true;
1671	mutex_unlock(&rp->task_lock);
1672}
1673
1674static int rhine_open(struct net_device *dev)
1675{
1676	struct rhine_private *rp = netdev_priv(dev);
1677	void __iomem *ioaddr = rp->base;
1678	int rc;
1679
1680	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1681	if (rc)
1682		goto out;
1683
1684	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1685
1686	rc = alloc_ring(dev);
1687	if (rc < 0)
1688		goto out_free_irq;
1689
1690	rc = alloc_rbufs(dev);
1691	if (rc < 0)
1692		goto out_free_ring;
1693
1694	alloc_tbufs(dev);
1695	enable_mmio(rp->pioaddr, rp->quirks);
1696	rhine_power_init(dev);
1697	rhine_chip_reset(dev);
1698	rhine_task_enable(rp);
1699	init_registers(dev);
1700
1701	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1702		  __func__, ioread16(ioaddr + ChipCmd),
1703		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1704
1705	netif_start_queue(dev);
1706
1707out:
1708	return rc;
1709
1710out_free_ring:
1711	free_ring(dev);
1712out_free_irq:
1713	free_irq(rp->irq, dev);
1714	goto out;
1715}
1716
1717static void rhine_reset_task(struct work_struct *work)
1718{
1719	struct rhine_private *rp = container_of(work, struct rhine_private,
1720						reset_task);
1721	struct net_device *dev = rp->dev;
1722
1723	mutex_lock(&rp->task_lock);
1724
1725	if (!rp->task_enable)
1726		goto out_unlock;
1727
1728	napi_disable(&rp->napi);
1729	netif_tx_disable(dev);
1730	spin_lock_bh(&rp->lock);
1731
1732	/* clear all descriptors */
1733	free_tbufs(dev);
1734	alloc_tbufs(dev);
1735
1736	rhine_reset_rbufs(rp);
1737
1738	/* Reinitialize the hardware. */
1739	rhine_chip_reset(dev);
1740	init_registers(dev);
1741
1742	spin_unlock_bh(&rp->lock);
1743
1744	netif_trans_update(dev); /* prevent tx timeout */
1745	dev->stats.tx_errors++;
1746	netif_wake_queue(dev);
1747
1748out_unlock:
1749	mutex_unlock(&rp->task_lock);
1750}
1751
1752static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1753{
1754	struct rhine_private *rp = netdev_priv(dev);
1755	void __iomem *ioaddr = rp->base;
1756
1757	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1758		    ioread16(ioaddr + IntrStatus),
1759		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1760
1761	schedule_work(&rp->reset_task);
1762}
1763
1764static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1765{
1766	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1767}
1768
1769static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1770				  struct net_device *dev)
1771{
1772	struct rhine_private *rp = netdev_priv(dev);
1773	struct device *hwdev = dev->dev.parent;
1774	void __iomem *ioaddr = rp->base;
1775	unsigned entry;
1776
1777	/* Caution: the write order is important here, set the field
1778	   with the "ownership" bits last. */
1779
1780	/* Calculate the next Tx descriptor entry. */
1781	entry = rp->cur_tx % TX_RING_SIZE;
1782
1783	if (skb_padto(skb, ETH_ZLEN))
1784		return NETDEV_TX_OK;
1785
1786	rp->tx_skbuff[entry] = skb;
1787
1788	if ((rp->quirks & rqRhineI) &&
1789	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1790		/* Must use alignment buffer. */
1791		if (skb->len > PKT_BUF_SZ) {
1792			/* packet too long, drop it */
1793			dev_kfree_skb_any(skb);
1794			rp->tx_skbuff[entry] = NULL;
1795			dev->stats.tx_dropped++;
1796			return NETDEV_TX_OK;
1797		}
1798
1799		/* Padding is not copied and so must be redone. */
1800		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1801		if (skb->len < ETH_ZLEN)
1802			memset(rp->tx_buf[entry] + skb->len, 0,
1803			       ETH_ZLEN - skb->len);
1804		rp->tx_skbuff_dma[entry] = 0;
1805		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1806						      (rp->tx_buf[entry] -
1807						       rp->tx_bufs));
1808	} else {
1809		rp->tx_skbuff_dma[entry] =
1810			dma_map_single(hwdev, skb->data, skb->len,
1811				       DMA_TO_DEVICE);
1812		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1813			dev_kfree_skb_any(skb);
1814			rp->tx_skbuff_dma[entry] = 0;
1815			dev->stats.tx_dropped++;
1816			return NETDEV_TX_OK;
1817		}
1818		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1819	}
1820
1821	rp->tx_ring[entry].desc_length =
1822		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1823
1824	if (unlikely(skb_vlan_tag_present(skb))) {
1825		u16 vid_pcp = skb_vlan_tag_get(skb);
1826
1827		/* drop CFI/DEI bit, register needs VID and PCP */
1828		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1829			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1830		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1831		/* request tagging */
1832		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1833	}
1834	else
1835		rp->tx_ring[entry].tx_status = 0;
1836
1837	netdev_sent_queue(dev, skb->len);
1838	/* lock eth irq */
1839	dma_wmb();
1840	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1841	wmb();
1842
1843	rp->cur_tx++;
1844	/*
1845	 * Nobody wants cur_tx write to rot for ages after the NIC will have
1846	 * seen the transmit request, especially as the transmit completion
1847	 * handler could miss it.
1848	 */
1849	smp_wmb();
1850
1851	/* Non-x86 Todo: explicitly flush cache lines here. */
1852
1853	if (skb_vlan_tag_present(skb))
1854		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1855		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1856
1857	/* Wake the potentially-idle transmit channel */
1858	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1859	       ioaddr + ChipCmd1);
1860	IOSYNC;
1861
1862	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1863	if (rhine_tx_queue_full(rp)) {
1864		netif_stop_queue(dev);
1865		smp_rmb();
1866		/* Rejuvenate. */
1867		if (!rhine_tx_queue_full(rp))
1868			netif_wake_queue(dev);
1869	}
1870
1871	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1872		  rp->cur_tx - 1, entry);
1873
1874	return NETDEV_TX_OK;
1875}
1876
1877static void rhine_irq_disable(struct rhine_private *rp)
1878{
1879	iowrite16(0x0000, rp->base + IntrEnable);
1880}
1881
1882/* The interrupt handler does all of the Rx thread work and cleans up
1883   after the Tx thread. */
1884static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1885{
1886	struct net_device *dev = dev_instance;
1887	struct rhine_private *rp = netdev_priv(dev);
1888	u32 status;
1889	int handled = 0;
1890
1891	status = rhine_get_events(rp);
1892
1893	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1894
1895	if (status & RHINE_EVENT) {
1896		handled = 1;
1897
1898		rhine_irq_disable(rp);
1899		napi_schedule(&rp->napi);
1900	}
1901
1902	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1903		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1904			  status);
1905	}
1906
1907	return IRQ_RETVAL(handled);
1908}
1909
1910/* This routine is logically part of the interrupt handler, but isolated
1911   for clarity. */
1912static void rhine_tx(struct net_device *dev)
1913{
1914	struct rhine_private *rp = netdev_priv(dev);
1915	struct device *hwdev = dev->dev.parent;
1916	unsigned int pkts_compl = 0, bytes_compl = 0;
1917	unsigned int dirty_tx = rp->dirty_tx;
1918	unsigned int cur_tx;
1919	struct sk_buff *skb;
1920
1921	/*
1922	 * The race with rhine_start_tx does not matter here as long as the
1923	 * driver enforces a value of cur_tx that was relevant when the
1924	 * packet was scheduled to the network chipset.
1925	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1926	 */
1927	smp_rmb();
1928	cur_tx = rp->cur_tx;
1929	/* find and cleanup dirty tx descriptors */
1930	while (dirty_tx != cur_tx) {
1931		unsigned int entry = dirty_tx % TX_RING_SIZE;
1932		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1933
1934		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1935			  entry, txstatus);
1936		if (txstatus & DescOwn)
1937			break;
1938		skb = rp->tx_skbuff[entry];
1939		if (txstatus & 0x8000) {
1940			netif_dbg(rp, tx_done, dev,
1941				  "Transmit error, Tx status %08x\n", txstatus);
1942			dev->stats.tx_errors++;
1943			if (txstatus & 0x0400)
1944				dev->stats.tx_carrier_errors++;
1945			if (txstatus & 0x0200)
1946				dev->stats.tx_window_errors++;
1947			if (txstatus & 0x0100)
1948				dev->stats.tx_aborted_errors++;
1949			if (txstatus & 0x0080)
1950				dev->stats.tx_heartbeat_errors++;
1951			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1952			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1953				dev->stats.tx_fifo_errors++;
1954				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1955				break; /* Keep the skb - we try again */
1956			}
1957			/* Transmitter restarted in 'abnormal' handler. */
1958		} else {
1959			if (rp->quirks & rqRhineI)
1960				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1961			else
1962				dev->stats.collisions += txstatus & 0x0F;
1963			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1964				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1965
1966			u64_stats_update_begin(&rp->tx_stats.syncp);
1967			rp->tx_stats.bytes += skb->len;
1968			rp->tx_stats.packets++;
1969			u64_stats_update_end(&rp->tx_stats.syncp);
1970		}
1971		/* Free the original skb. */
1972		if (rp->tx_skbuff_dma[entry]) {
1973			dma_unmap_single(hwdev,
1974					 rp->tx_skbuff_dma[entry],
1975					 skb->len,
1976					 DMA_TO_DEVICE);
1977		}
1978		bytes_compl += skb->len;
1979		pkts_compl++;
1980		dev_consume_skb_any(skb);
1981		rp->tx_skbuff[entry] = NULL;
1982		dirty_tx++;
1983	}
1984
1985	rp->dirty_tx = dirty_tx;
1986	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
1987	smp_wmb();
1988
1989	netdev_completed_queue(dev, pkts_compl, bytes_compl);
1990
1991	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
1992	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1993		netif_wake_queue(dev);
1994		smp_rmb();
1995		/* Rejuvenate. */
1996		if (rhine_tx_queue_full(rp))
1997			netif_stop_queue(dev);
1998	}
1999}
2000
2001/**
2002 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2003 * @skb: pointer to sk_buff
2004 * @data_size: used data area of the buffer including CRC
2005 *
2006 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2007 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2008 * aligned following the CRC.
2009 */
2010static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2011{
2012	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2013	return be16_to_cpup((__be16 *)trailer);
2014}
2015
2016static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2017				     int data_size)
2018{
2019	dma_rmb();
2020	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2021		u16 vlan_tci;
2022
2023		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2024		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2025	}
2026}
2027
2028/* Process up to limit frames from receive ring */
2029static int rhine_rx(struct net_device *dev, int limit)
2030{
2031	struct rhine_private *rp = netdev_priv(dev);
2032	struct device *hwdev = dev->dev.parent;
2033	int entry = rp->cur_rx % RX_RING_SIZE;
2034	int count;
2035
2036	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2037		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2038
2039	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2040	for (count = 0; count < limit; ++count) {
2041		struct rx_desc *desc = rp->rx_ring + entry;
2042		u32 desc_status = le32_to_cpu(desc->rx_status);
2043		int data_size = desc_status >> 16;
2044
2045		if (desc_status & DescOwn)
2046			break;
2047
2048		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2049			  desc_status);
2050
2051		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2052			if ((desc_status & RxWholePkt) != RxWholePkt) {
2053				netdev_warn(dev,
2054	"Oversized Ethernet frame spanned multiple buffers, "
2055	"entry %#x length %d status %08x!\n",
2056					    entry, data_size,
2057					    desc_status);
2058				dev->stats.rx_length_errors++;
2059			} else if (desc_status & RxErr) {
2060				/* There was a error. */
2061				netif_dbg(rp, rx_err, dev,
2062					  "%s() Rx error %08x\n", __func__,
2063					  desc_status);
2064				dev->stats.rx_errors++;
2065				if (desc_status & 0x0030)
2066					dev->stats.rx_length_errors++;
2067				if (desc_status & 0x0048)
2068					dev->stats.rx_fifo_errors++;
2069				if (desc_status & 0x0004)
2070					dev->stats.rx_frame_errors++;
2071				if (desc_status & 0x0002) {
2072					/* this can also be updated outside the interrupt handler */
2073					spin_lock(&rp->lock);
2074					dev->stats.rx_crc_errors++;
2075					spin_unlock(&rp->lock);
2076				}
2077			}
2078		} else {
2079			/* Length should omit the CRC */
2080			int pkt_len = data_size - 4;
2081			struct sk_buff *skb;
2082
2083			/* Check if the packet is long enough to accept without
2084			   copying to a minimally-sized skbuff. */
2085			if (pkt_len < rx_copybreak) {
2086				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2087				if (unlikely(!skb))
2088					goto drop;
2089
2090				dma_sync_single_for_cpu(hwdev,
2091							rp->rx_skbuff_dma[entry],
2092							rp->rx_buf_sz,
2093							DMA_FROM_DEVICE);
2094
2095				skb_copy_to_linear_data(skb,
2096						 rp->rx_skbuff[entry]->data,
2097						 pkt_len);
2098
2099				dma_sync_single_for_device(hwdev,
2100							   rp->rx_skbuff_dma[entry],
2101							   rp->rx_buf_sz,
2102							   DMA_FROM_DEVICE);
2103			} else {
2104				struct rhine_skb_dma sd;
2105
2106				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2107					goto drop;
2108
2109				skb = rp->rx_skbuff[entry];
2110
2111				dma_unmap_single(hwdev,
2112						 rp->rx_skbuff_dma[entry],
2113						 rp->rx_buf_sz,
2114						 DMA_FROM_DEVICE);
2115				rhine_skb_dma_nic_store(rp, &sd, entry);
2116			}
2117
2118			skb_put(skb, pkt_len);
2119
2120			rhine_rx_vlan_tag(skb, desc, data_size);
2121
2122			skb->protocol = eth_type_trans(skb, dev);
2123
2124			netif_receive_skb(skb);
2125
2126			u64_stats_update_begin(&rp->rx_stats.syncp);
2127			rp->rx_stats.bytes += pkt_len;
2128			rp->rx_stats.packets++;
2129			u64_stats_update_end(&rp->rx_stats.syncp);
2130		}
2131give_descriptor_to_nic:
2132		desc->rx_status = cpu_to_le32(DescOwn);
2133		entry = (++rp->cur_rx) % RX_RING_SIZE;
2134	}
2135
2136	return count;
2137
2138drop:
2139	dev->stats.rx_dropped++;
2140	goto give_descriptor_to_nic;
2141}
2142
2143static void rhine_restart_tx(struct net_device *dev) {
2144	struct rhine_private *rp = netdev_priv(dev);
2145	void __iomem *ioaddr = rp->base;
2146	int entry = rp->dirty_tx % TX_RING_SIZE;
2147	u32 intr_status;
2148
2149	/*
2150	 * If new errors occurred, we need to sort them out before doing Tx.
2151	 * In that case the ISR will be back here RSN anyway.
2152	 */
2153	intr_status = rhine_get_events(rp);
2154
2155	if ((intr_status & IntrTxErrSummary) == 0) {
2156
2157		/* We know better than the chip where it should continue. */
2158		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2159		       ioaddr + TxRingPtr);
2160
2161		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2162		       ioaddr + ChipCmd);
2163
2164		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2165			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2166			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2167
2168		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2169		       ioaddr + ChipCmd1);
2170		IOSYNC;
2171	}
2172	else {
2173		/* This should never happen */
2174		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2175			   intr_status);
2176	}
2177
2178}
2179
2180static void rhine_slow_event_task(struct work_struct *work)
2181{
2182	struct rhine_private *rp =
2183		container_of(work, struct rhine_private, slow_event_task);
2184	struct net_device *dev = rp->dev;
2185	u32 intr_status;
2186
2187	mutex_lock(&rp->task_lock);
2188
2189	if (!rp->task_enable)
2190		goto out_unlock;
2191
2192	intr_status = rhine_get_events(rp);
2193	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2194
2195	if (intr_status & IntrLinkChange)
2196		rhine_check_media(dev, 0);
2197
2198	if (intr_status & IntrPCIErr)
2199		netif_warn(rp, hw, dev, "PCI error\n");
2200
2201	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2202
2203out_unlock:
2204	mutex_unlock(&rp->task_lock);
2205}
2206
2207static void
2208rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2209{
2210	struct rhine_private *rp = netdev_priv(dev);
2211	unsigned int start;
2212
2213	spin_lock_bh(&rp->lock);
2214	rhine_update_rx_crc_and_missed_errord(rp);
2215	spin_unlock_bh(&rp->lock);
2216
2217	netdev_stats_to_stats64(stats, &dev->stats);
2218
2219	do {
2220		start = u64_stats_fetch_begin(&rp->rx_stats.syncp);
2221		stats->rx_packets = rp->rx_stats.packets;
2222		stats->rx_bytes = rp->rx_stats.bytes;
2223	} while (u64_stats_fetch_retry(&rp->rx_stats.syncp, start));
2224
2225	do {
2226		start = u64_stats_fetch_begin(&rp->tx_stats.syncp);
2227		stats->tx_packets = rp->tx_stats.packets;
2228		stats->tx_bytes = rp->tx_stats.bytes;
2229	} while (u64_stats_fetch_retry(&rp->tx_stats.syncp, start));
2230}
2231
2232static void rhine_set_rx_mode(struct net_device *dev)
2233{
2234	struct rhine_private *rp = netdev_priv(dev);
2235	void __iomem *ioaddr = rp->base;
2236	u32 mc_filter[2];	/* Multicast hash filter */
2237	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2238	struct netdev_hw_addr *ha;
2239
2240	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2241		rx_mode = 0x1C;
2242		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2243		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2244	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2245		   (dev->flags & IFF_ALLMULTI)) {
2246		/* Too many to match, or accept all multicasts. */
2247		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2248		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2249	} else if (rp->quirks & rqMgmt) {
2250		int i = 0;
2251		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2252		netdev_for_each_mc_addr(ha, dev) {
2253			if (i == MCAM_SIZE)
2254				break;
2255			rhine_set_cam(ioaddr, i, ha->addr);
2256			mCAMmask |= 1 << i;
2257			i++;
2258		}
2259		rhine_set_cam_mask(ioaddr, mCAMmask);
2260	} else {
2261		memset(mc_filter, 0, sizeof(mc_filter));
2262		netdev_for_each_mc_addr(ha, dev) {
2263			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2264
2265			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2266		}
2267		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2268		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2269	}
2270	/* enable/disable VLAN receive filtering */
2271	if (rp->quirks & rqMgmt) {
2272		if (dev->flags & IFF_PROMISC)
2273			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2274		else
2275			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2276	}
2277	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2278}
2279
2280static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2281{
2282	struct device *hwdev = dev->dev.parent;
2283
2284	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
2285	strscpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
 
2286}
2287
2288static int netdev_get_link_ksettings(struct net_device *dev,
2289				     struct ethtool_link_ksettings *cmd)
2290{
2291	struct rhine_private *rp = netdev_priv(dev);
2292
2293	mutex_lock(&rp->task_lock);
2294	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2295	mutex_unlock(&rp->task_lock);
2296
2297	return 0;
2298}
2299
2300static int netdev_set_link_ksettings(struct net_device *dev,
2301				     const struct ethtool_link_ksettings *cmd)
2302{
2303	struct rhine_private *rp = netdev_priv(dev);
2304	int rc;
2305
2306	mutex_lock(&rp->task_lock);
2307	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2308	rhine_set_carrier(&rp->mii_if);
2309	mutex_unlock(&rp->task_lock);
2310
2311	return rc;
2312}
2313
2314static int netdev_nway_reset(struct net_device *dev)
2315{
2316	struct rhine_private *rp = netdev_priv(dev);
2317
2318	return mii_nway_restart(&rp->mii_if);
2319}
2320
2321static u32 netdev_get_link(struct net_device *dev)
2322{
2323	struct rhine_private *rp = netdev_priv(dev);
2324
2325	return mii_link_ok(&rp->mii_if);
2326}
2327
2328static u32 netdev_get_msglevel(struct net_device *dev)
2329{
2330	struct rhine_private *rp = netdev_priv(dev);
2331
2332	return rp->msg_enable;
2333}
2334
2335static void netdev_set_msglevel(struct net_device *dev, u32 value)
2336{
2337	struct rhine_private *rp = netdev_priv(dev);
2338
2339	rp->msg_enable = value;
2340}
2341
2342static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2343{
2344	struct rhine_private *rp = netdev_priv(dev);
2345
2346	if (!(rp->quirks & rqWOL))
2347		return;
2348
2349	spin_lock_irq(&rp->lock);
2350	wol->supported = WAKE_PHY | WAKE_MAGIC |
2351			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2352	wol->wolopts = rp->wolopts;
2353	spin_unlock_irq(&rp->lock);
2354}
2355
2356static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2357{
2358	struct rhine_private *rp = netdev_priv(dev);
2359	u32 support = WAKE_PHY | WAKE_MAGIC |
2360		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2361
2362	if (!(rp->quirks & rqWOL))
2363		return -EINVAL;
2364
2365	if (wol->wolopts & ~support)
2366		return -EINVAL;
2367
2368	spin_lock_irq(&rp->lock);
2369	rp->wolopts = wol->wolopts;
2370	spin_unlock_irq(&rp->lock);
2371
2372	return 0;
2373}
2374
2375static const struct ethtool_ops netdev_ethtool_ops = {
2376	.get_drvinfo		= netdev_get_drvinfo,
2377	.nway_reset		= netdev_nway_reset,
2378	.get_link		= netdev_get_link,
2379	.get_msglevel		= netdev_get_msglevel,
2380	.set_msglevel		= netdev_set_msglevel,
2381	.get_wol		= rhine_get_wol,
2382	.set_wol		= rhine_set_wol,
2383	.get_link_ksettings	= netdev_get_link_ksettings,
2384	.set_link_ksettings	= netdev_set_link_ksettings,
2385};
2386
2387static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2388{
2389	struct rhine_private *rp = netdev_priv(dev);
2390	int rc;
2391
2392	if (!netif_running(dev))
2393		return -EINVAL;
2394
2395	mutex_lock(&rp->task_lock);
2396	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2397	rhine_set_carrier(&rp->mii_if);
2398	mutex_unlock(&rp->task_lock);
2399
2400	return rc;
2401}
2402
2403static int rhine_close(struct net_device *dev)
2404{
2405	struct rhine_private *rp = netdev_priv(dev);
2406	void __iomem *ioaddr = rp->base;
2407
2408	rhine_task_disable(rp);
2409	napi_disable(&rp->napi);
2410	netif_stop_queue(dev);
2411
2412	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2413		  ioread16(ioaddr + ChipCmd));
2414
2415	/* Switch to loopback mode to avoid hardware races. */
2416	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2417
2418	rhine_irq_disable(rp);
2419
2420	/* Stop the chip's Tx and Rx processes. */
2421	iowrite16(CmdStop, ioaddr + ChipCmd);
2422
2423	free_irq(rp->irq, dev);
2424	free_rbufs(dev);
2425	free_tbufs(dev);
2426	free_ring(dev);
2427
2428	return 0;
2429}
2430
2431
2432static void rhine_remove_one_pci(struct pci_dev *pdev)
2433{
2434	struct net_device *dev = pci_get_drvdata(pdev);
2435	struct rhine_private *rp = netdev_priv(dev);
2436
2437	unregister_netdev(dev);
2438
2439	pci_iounmap(pdev, rp->base);
2440	pci_release_regions(pdev);
2441
2442	free_netdev(dev);
2443	pci_disable_device(pdev);
2444}
2445
2446static void rhine_remove_one_platform(struct platform_device *pdev)
2447{
2448	struct net_device *dev = platform_get_drvdata(pdev);
2449	struct rhine_private *rp = netdev_priv(dev);
2450
2451	unregister_netdev(dev);
2452
2453	iounmap(rp->base);
2454
2455	free_netdev(dev);
 
 
2456}
2457
2458static void rhine_shutdown_pci(struct pci_dev *pdev)
2459{
2460	struct net_device *dev = pci_get_drvdata(pdev);
2461	struct rhine_private *rp = netdev_priv(dev);
2462	void __iomem *ioaddr = rp->base;
2463
2464	if (!(rp->quirks & rqWOL))
2465		return; /* Nothing to do for non-WOL adapters */
2466
2467	rhine_power_init(dev);
2468
2469	/* Make sure we use pattern 0, 1 and not 4, 5 */
2470	if (rp->quirks & rq6patterns)
2471		iowrite8(0x04, ioaddr + WOLcgClr);
2472
2473	spin_lock(&rp->lock);
2474
2475	if (rp->wolopts & WAKE_MAGIC) {
2476		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2477		/*
2478		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2479		 * not cooperate otherwise.
2480		 */
2481		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2482	}
2483
2484	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2485		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2486
2487	if (rp->wolopts & WAKE_PHY)
2488		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2489
2490	if (rp->wolopts & WAKE_UCAST)
2491		iowrite8(WOLucast, ioaddr + WOLcrSet);
2492
2493	if (rp->wolopts) {
2494		/* Enable legacy WOL (for old motherboards) */
2495		iowrite8(0x01, ioaddr + PwcfgSet);
2496		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2497	}
2498
2499	spin_unlock(&rp->lock);
2500
2501	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2502		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2503
2504		pci_wake_from_d3(pdev, true);
2505		pci_set_power_state(pdev, PCI_D3hot);
2506	}
2507}
2508
2509#ifdef CONFIG_PM_SLEEP
2510static int rhine_suspend(struct device *device)
2511{
2512	struct net_device *dev = dev_get_drvdata(device);
2513	struct rhine_private *rp = netdev_priv(dev);
2514
2515	if (!netif_running(dev))
2516		return 0;
2517
2518	rhine_task_disable(rp);
2519	rhine_irq_disable(rp);
2520	napi_disable(&rp->napi);
2521
2522	netif_device_detach(dev);
2523
2524	if (dev_is_pci(device))
2525		rhine_shutdown_pci(to_pci_dev(device));
2526
2527	return 0;
2528}
2529
2530static int rhine_resume(struct device *device)
2531{
2532	struct net_device *dev = dev_get_drvdata(device);
2533	struct rhine_private *rp = netdev_priv(dev);
2534
2535	if (!netif_running(dev))
2536		return 0;
2537
2538	enable_mmio(rp->pioaddr, rp->quirks);
2539	rhine_power_init(dev);
2540	free_tbufs(dev);
2541	alloc_tbufs(dev);
2542	rhine_reset_rbufs(rp);
2543	rhine_task_enable(rp);
2544	spin_lock_bh(&rp->lock);
2545	init_registers(dev);
2546	spin_unlock_bh(&rp->lock);
2547
2548	netif_device_attach(dev);
2549
2550	return 0;
2551}
2552
2553static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2554#define RHINE_PM_OPS	(&rhine_pm_ops)
2555
2556#else
2557
2558#define RHINE_PM_OPS	NULL
2559
2560#endif /* !CONFIG_PM_SLEEP */
2561
2562static struct pci_driver rhine_driver_pci = {
2563	.name		= DRV_NAME,
2564	.id_table	= rhine_pci_tbl,
2565	.probe		= rhine_init_one_pci,
2566	.remove		= rhine_remove_one_pci,
2567	.shutdown	= rhine_shutdown_pci,
2568	.driver.pm	= RHINE_PM_OPS,
2569};
2570
2571static struct platform_driver rhine_driver_platform = {
2572	.probe		= rhine_init_one_platform,
2573	.remove		= rhine_remove_one_platform,
2574	.driver = {
2575		.name	= DRV_NAME,
2576		.of_match_table	= rhine_of_tbl,
2577		.pm		= RHINE_PM_OPS,
2578	}
2579};
2580
2581static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2582	{
2583		.ident = "EPIA-M",
2584		.matches = {
2585			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2586			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2587		},
2588	},
2589	{
2590		.ident = "KV7",
2591		.matches = {
2592			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2593			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2594		},
2595	},
2596	{ NULL }
2597};
2598
2599static int __init rhine_init(void)
2600{
2601	int ret_pci, ret_platform;
2602
2603/* when a module, this is printed whether or not devices are found in probe */
 
 
 
2604	if (dmi_check_system(rhine_dmi_table)) {
2605		/* these BIOSes fail at PXE boot if chip is in D3 */
2606		avoid_D3 = true;
2607		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2608	}
2609	else if (avoid_D3)
2610		pr_info("avoid_D3 set\n");
2611
2612	ret_pci = pci_register_driver(&rhine_driver_pci);
2613	ret_platform = platform_driver_register(&rhine_driver_platform);
2614	if ((ret_pci < 0) && (ret_platform < 0))
2615		return ret_pci;
2616
2617	return 0;
2618}
2619
2620
2621static void __exit rhine_cleanup(void)
2622{
2623	platform_driver_unregister(&rhine_driver_platform);
2624	pci_unregister_driver(&rhine_driver_pci);
2625}
2626
2627
2628module_init(rhine_init);
2629module_exit(rhine_cleanup);
v5.4
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3	Written 1998-2001 by Donald Becker.
   4
   5	Current Maintainer: Roger Luethi <rl@hellgate.ch>
   6
   7	This software may be used and distributed according to the terms of
   8	the GNU General Public License (GPL), incorporated herein by reference.
   9	Drivers based on or derived from this code fall under the GPL and must
  10	retain the authorship, copyright and license notice.  This file is not
  11	a complete program and may only be used when the entire operating
  12	system is licensed under the GPL.
  13
  14	This driver is designed for the VIA VT86C100A Rhine-I.
  15	It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16	and management NIC 6105M).
  17
  18	The author may be reached as becker@scyld.com, or C/O
  19	Scyld Computing Corporation
  20	410 Severn Ave., Suite 210
  21	Annapolis MD 21403
  22
  23
  24	This driver contains some changes from the original Donald Becker
  25	version. He may or may not be interested in bug reports on this
  26	code. You can find his versions at:
  27	http://www.scyld.com/network/via-rhine.html
  28	[link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#define DRV_NAME	"via-rhine"
  35#define DRV_VERSION	"1.5.1"
  36#define DRV_RELDATE	"2010-10-09"
  37
  38#include <linux/types.h>
  39
  40/* A few user-configurable values.
  41   These may be modified when a driver module is loaded. */
  42static int debug = 0;
  43#define RHINE_MSG_DEFAULT \
  44        (0x0000)
  45
  46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  47   Setting to > 1518 effectively disables this feature. */
  48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  49	defined(CONFIG_SPARC) || defined(__ia64__) ||		   \
  50	defined(__sh__) || defined(__mips__)
  51static int rx_copybreak = 1518;
  52#else
  53static int rx_copybreak;
  54#endif
  55
  56/* Work-around for broken BIOSes: they are unable to get the chip back out of
  57   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  58static bool avoid_D3;
  59
  60/*
  61 * In case you are looking for 'options[]' or 'full_duplex[]', they
  62 * are gone. Use ethtool(8) instead.
  63 */
  64
  65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  66   The Rhine has a 64 element 8390-like hash table. */
  67static const int multicast_filter_limit = 32;
  68
  69
  70/* Operational parameters that are set at compile time. */
  71
  72/* Keep the ring sizes a power of two for compile efficiency.
  73 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  74 * Making the Tx ring too large decreases the effectiveness of channel
  75 * bonding and packet priority.
  76 * With BQL support, we can increase TX ring safely.
  77 * There are no ill effects from too-large receive rings.
  78 */
  79#define TX_RING_SIZE	64
  80#define TX_QUEUE_LEN	(TX_RING_SIZE - 6)	/* Limit ring entries actually used. */
  81#define RX_RING_SIZE	64
  82
  83/* Operational parameters that usually are not changed. */
  84
  85/* Time in jiffies before concluding the transmitter is hung. */
  86#define TX_TIMEOUT	(2*HZ)
  87
  88#define PKT_BUF_SZ	1536	/* Size of each temporary Rx buffer.*/
  89
  90#include <linux/module.h>
  91#include <linux/moduleparam.h>
  92#include <linux/kernel.h>
  93#include <linux/string.h>
  94#include <linux/timer.h>
  95#include <linux/errno.h>
  96#include <linux/ioport.h>
  97#include <linux/interrupt.h>
  98#include <linux/pci.h>
  99#include <linux/of_device.h>
 100#include <linux/of_irq.h>
 101#include <linux/platform_device.h>
 102#include <linux/dma-mapping.h>
 103#include <linux/netdevice.h>
 104#include <linux/etherdevice.h>
 105#include <linux/skbuff.h>
 106#include <linux/init.h>
 107#include <linux/delay.h>
 108#include <linux/mii.h>
 109#include <linux/ethtool.h>
 110#include <linux/crc32.h>
 111#include <linux/if_vlan.h>
 112#include <linux/bitops.h>
 113#include <linux/workqueue.h>
 114#include <asm/processor.h>	/* Processor type for cache alignment. */
 115#include <asm/io.h>
 116#include <asm/irq.h>
 117#include <linux/uaccess.h>
 118#include <linux/dmi.h>
 119
 120/* These identify the driver base version and may not be removed. */
 121static const char version[] =
 122	"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
 123
 124MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 125MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 126MODULE_LICENSE("GPL");
 127
 128module_param(debug, int, 0);
 129module_param(rx_copybreak, int, 0);
 130module_param(avoid_D3, bool, 0);
 131MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
 132MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 133MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 134
 135#define MCAM_SIZE	32
 136#define VCAM_SIZE	32
 137
 138/*
 139		Theory of Operation
 140
 141I. Board Compatibility
 142
 143This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 144controller.
 145
 146II. Board-specific settings
 147
 148Boards with this chip are functional only in a bus-master PCI slot.
 149
 150Many operational settings are loaded from the EEPROM to the Config word at
 151offset 0x78. For most of these settings, this driver assumes that they are
 152correct.
 153If this driver is compiled to use PCI memory space operations the EEPROM
 154must be configured to enable memory ops.
 155
 156III. Driver operation
 157
 158IIIa. Ring buffers
 159
 160This driver uses two statically allocated fixed-size descriptor lists
 161formed into rings by a branch from the final descriptor to the beginning of
 162the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 163
 164IIIb/c. Transmit/Receive Structure
 165
 166This driver attempts to use a zero-copy receive and transmit scheme.
 167
 168Alas, all data buffers are required to start on a 32 bit boundary, so
 169the driver must often copy transmit packets into bounce buffers.
 170
 171The driver allocates full frame size skbuffs for the Rx ring buffers at
 172open() time and passes the skb->data field to the chip as receive data
 173buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 174a fresh skbuff is allocated and the frame is copied to the new skbuff.
 175When the incoming frame is larger, the skbuff is passed directly up the
 176protocol stack. Buffers consumed this way are replaced by newly allocated
 177skbuffs in the last phase of rhine_rx().
 178
 179The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 180using a full-sized skbuff for small frames vs. the copying costs of larger
 181frames. New boards are typically used in generously configured machines
 182and the underfilled buffers have negligible impact compared to the benefit of
 183a single allocation size, so the default value of zero results in never
 184copying packets. When copying is done, the cost is usually mitigated by using
 185a combined copy/checksum routine. Copying also preloads the cache, which is
 186most useful with small frames.
 187
 188Since the VIA chips are only able to transfer data to buffers on 32 bit
 189boundaries, the IP header at offset 14 in an ethernet frame isn't
 190longword aligned for further processing. Copying these unaligned buffers
 191has the beneficial effect of 16-byte aligning the IP header.
 192
 193IIId. Synchronization
 194
 195The driver runs as two independent, single-threaded flows of control. One
 196is the send-packet routine, which enforces single-threaded use by the
 197netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 198which is single threaded by the hardware and interrupt handling software.
 199
 200The send packet thread has partial control over the Tx ring. It locks the
 201netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 202the ring is not available it stops the transmit queue by
 203calling netif_stop_queue.
 204
 205The interrupt handler has exclusive control over the Rx ring and records stats
 206from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 207empty by incrementing the dirty_tx mark. If at least half of the entries in
 208the Rx ring are available the transmit queue is woken up if it was stopped.
 209
 210IV. Notes
 211
 212IVb. References
 213
 214Preliminary VT86C100A manual from http://www.via.com.tw/
 215http://www.scyld.com/expert/100mbps.html
 216http://www.scyld.com/expert/NWay.html
 217ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 218ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 219
 220
 221IVc. Errata
 222
 223The VT86C100A manual is not reliable information.
 224The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 225in significant performance degradation for bounce buffer copies on transmit
 226and unaligned IP headers on receive.
 227The chip does not pad to minimum transmit length.
 228
 229*/
 230
 231
 232/* This table drives the PCI probe routines. It's mostly boilerplate in all
 233   of the drivers, and will likely be provided by some future kernel.
 234   Note the matching code -- the first table entry matchs all 56** cards but
 235   second only the 1234 card.
 236*/
 237
 238enum rhine_revs {
 239	VT86C100A	= 0x00,
 240	VTunknown0	= 0x20,
 241	VT6102		= 0x40,
 242	VT8231		= 0x50,	/* Integrated MAC */
 243	VT8233		= 0x60,	/* Integrated MAC */
 244	VT8235		= 0x74,	/* Integrated MAC */
 245	VT8237		= 0x78,	/* Integrated MAC */
 246	VTunknown1	= 0x7C,
 247	VT6105		= 0x80,
 248	VT6105_B0	= 0x83,
 249	VT6105L		= 0x8A,
 250	VT6107		= 0x8C,
 251	VTunknown2	= 0x8E,
 252	VT6105M		= 0x90,	/* Management adapter */
 253};
 254
 255enum rhine_quirks {
 256	rqWOL		= 0x0001,	/* Wake-On-LAN support */
 257	rqForceReset	= 0x0002,
 258	rq6patterns	= 0x0040,	/* 6 instead of 4 patterns for WOL */
 259	rqStatusWBRace	= 0x0080,	/* Tx Status Writeback Error possible */
 260	rqRhineI	= 0x0100,	/* See comment below */
 261	rqIntPHY	= 0x0200,	/* Integrated PHY */
 262	rqMgmt		= 0x0400,	/* Management adapter */
 263	rqNeedEnMMIO	= 0x0800,	/* Whether the core needs to be
 264					 * switched from PIO mode to MMIO
 265					 * (only applies to PCI)
 266					 */
 267};
 268/*
 269 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 270 * MMIO as well as for the collision counter and the Tx FIFO underflow
 271 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 272 */
 273
 274/* Beware of PCI posted writes */
 275#define IOSYNC	do { ioread8(ioaddr + StationAddr); } while (0)
 276
 277static const struct pci_device_id rhine_pci_tbl[] = {
 278	{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },	/* VT86C100A */
 279	{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6102 */
 280	{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },	/* 6105{,L,LOM} */
 281	{ 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },	/* VT6105M */
 282	{ }	/* terminate list */
 283};
 284MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 285
 286/* OpenFirmware identifiers for platform-bus devices
 287 * The .data field is currently only used to store quirks
 288 */
 289static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
 290static const struct of_device_id rhine_of_tbl[] = {
 291	{ .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
 292	{ }	/* terminate list */
 293};
 294MODULE_DEVICE_TABLE(of, rhine_of_tbl);
 295
 296/* Offsets to the device registers. */
 297enum register_offsets {
 298	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 299	ChipCmd1=0x09, TQWake=0x0A,
 300	IntrStatus=0x0C, IntrEnable=0x0E,
 301	MulticastFilter0=0x10, MulticastFilter1=0x14,
 302	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 303	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 304	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 305	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 306	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 307	StickyHW=0x83, IntrStatus2=0x84,
 308	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 309	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 310	WOLcrClr1=0xA6, WOLcgClr=0xA7,
 311	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 312};
 313
 314/* Bits in ConfigD */
 315enum backoff_bits {
 316	BackOptional=0x01, BackModify=0x02,
 317	BackCaptureEffect=0x04, BackRandom=0x08
 318};
 319
 320/* Bits in the TxConfig (TCR) register */
 321enum tcr_bits {
 322	TCR_PQEN=0x01,
 323	TCR_LB0=0x02,		/* loopback[0] */
 324	TCR_LB1=0x04,		/* loopback[1] */
 325	TCR_OFSET=0x08,
 326	TCR_RTGOPT=0x10,
 327	TCR_RTFT0=0x20,
 328	TCR_RTFT1=0x40,
 329	TCR_RTSF=0x80,
 330};
 331
 332/* Bits in the CamCon (CAMC) register */
 333enum camcon_bits {
 334	CAMC_CAMEN=0x01,
 335	CAMC_VCAMSL=0x02,
 336	CAMC_CAMWR=0x04,
 337	CAMC_CAMRD=0x08,
 338};
 339
 340/* Bits in the PCIBusConfig1 (BCR1) register */
 341enum bcr1_bits {
 342	BCR1_POT0=0x01,
 343	BCR1_POT1=0x02,
 344	BCR1_POT2=0x04,
 345	BCR1_CTFT0=0x08,
 346	BCR1_CTFT1=0x10,
 347	BCR1_CTSF=0x20,
 348	BCR1_TXQNOBK=0x40,	/* for VT6105 */
 349	BCR1_VIDFR=0x80,	/* for VT6105 */
 350	BCR1_MED0=0x40,		/* for VT6102 */
 351	BCR1_MED1=0x80,		/* for VT6102 */
 352};
 353
 354/* Registers we check that mmio and reg are the same. */
 355static const int mmio_verify_registers[] = {
 356	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 357	0
 358};
 359
 360/* Bits in the interrupt status/mask registers. */
 361enum intr_status_bits {
 362	IntrRxDone	= 0x0001,
 363	IntrTxDone	= 0x0002,
 364	IntrRxErr	= 0x0004,
 365	IntrTxError	= 0x0008,
 366	IntrRxEmpty	= 0x0020,
 367	IntrPCIErr	= 0x0040,
 368	IntrStatsMax	= 0x0080,
 369	IntrRxEarly	= 0x0100,
 370	IntrTxUnderrun	= 0x0210,
 371	IntrRxOverflow	= 0x0400,
 372	IntrRxDropped	= 0x0800,
 373	IntrRxNoBuf	= 0x1000,
 374	IntrTxAborted	= 0x2000,
 375	IntrLinkChange	= 0x4000,
 376	IntrRxWakeUp	= 0x8000,
 377	IntrTxDescRace		= 0x080000,	/* mapped from IntrStatus2 */
 378	IntrNormalSummary	= IntrRxDone | IntrTxDone,
 379	IntrTxErrSummary	= IntrTxDescRace | IntrTxAborted | IntrTxError |
 380				  IntrTxUnderrun,
 381};
 382
 383/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 384enum wol_bits {
 385	WOLucast	= 0x10,
 386	WOLmagic	= 0x20,
 387	WOLbmcast	= 0x30,
 388	WOLlnkon	= 0x40,
 389	WOLlnkoff	= 0x80,
 390};
 391
 392/* The Rx and Tx buffer descriptors. */
 393struct rx_desc {
 394	__le32 rx_status;
 395	__le32 desc_length; /* Chain flag, Buffer/frame length */
 396	__le32 addr;
 397	__le32 next_desc;
 398};
 399struct tx_desc {
 400	__le32 tx_status;
 401	__le32 desc_length; /* Chain flag, Tx Config, Frame length */
 402	__le32 addr;
 403	__le32 next_desc;
 404};
 405
 406/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 407#define TXDESC		0x00e08000
 408
 409enum rx_status_bits {
 410	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 411};
 412
 413/* Bits in *_desc.*_status */
 414enum desc_status_bits {
 415	DescOwn=0x80000000
 416};
 417
 418/* Bits in *_desc.*_length */
 419enum desc_length_bits {
 420	DescTag=0x00010000
 421};
 422
 423/* Bits in ChipCmd. */
 424enum chip_cmd_bits {
 425	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 426	CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 427	Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 428	Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 429};
 430
 431struct rhine_stats {
 432	u64		packets;
 433	u64		bytes;
 434	struct u64_stats_sync syncp;
 435};
 436
 437struct rhine_private {
 438	/* Bit mask for configured VLAN ids */
 439	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 440
 441	/* Descriptor rings */
 442	struct rx_desc *rx_ring;
 443	struct tx_desc *tx_ring;
 444	dma_addr_t rx_ring_dma;
 445	dma_addr_t tx_ring_dma;
 446
 447	/* The addresses of receive-in-place skbuffs. */
 448	struct sk_buff *rx_skbuff[RX_RING_SIZE];
 449	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 450
 451	/* The saved address of a sent-in-place packet/buffer, for later free(). */
 452	struct sk_buff *tx_skbuff[TX_RING_SIZE];
 453	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 454
 455	/* Tx bounce buffers (Rhine-I only) */
 456	unsigned char *tx_buf[TX_RING_SIZE];
 457	unsigned char *tx_bufs;
 458	dma_addr_t tx_bufs_dma;
 459
 460	int irq;
 461	long pioaddr;
 462	struct net_device *dev;
 463	struct napi_struct napi;
 464	spinlock_t lock;
 465	struct mutex task_lock;
 466	bool task_enable;
 467	struct work_struct slow_event_task;
 468	struct work_struct reset_task;
 469
 470	u32 msg_enable;
 471
 472	/* Frequently used values: keep some adjacent for cache effect. */
 473	u32 quirks;
 474	unsigned int cur_rx;
 475	unsigned int cur_tx, dirty_tx;
 476	unsigned int rx_buf_sz;		/* Based on MTU+slack. */
 477	struct rhine_stats rx_stats;
 478	struct rhine_stats tx_stats;
 479	u8 wolopts;
 480
 481	u8 tx_thresh, rx_thresh;
 482
 483	struct mii_if_info mii_if;
 484	void __iomem *base;
 485};
 486
 487#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
 488#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
 489#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
 490
 491#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
 492#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
 493#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
 494
 495#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
 496#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
 497#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
 498
 499#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
 500#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
 501#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
 502
 503
 504static int  mdio_read(struct net_device *dev, int phy_id, int location);
 505static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 506static int  rhine_open(struct net_device *dev);
 507static void rhine_reset_task(struct work_struct *work);
 508static void rhine_slow_event_task(struct work_struct *work);
 509static void rhine_tx_timeout(struct net_device *dev);
 510static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 511				  struct net_device *dev);
 512static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 513static void rhine_tx(struct net_device *dev);
 514static int rhine_rx(struct net_device *dev, int limit);
 515static void rhine_set_rx_mode(struct net_device *dev);
 516static void rhine_get_stats64(struct net_device *dev,
 517			      struct rtnl_link_stats64 *stats);
 518static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 519static const struct ethtool_ops netdev_ethtool_ops;
 520static int  rhine_close(struct net_device *dev);
 521static int rhine_vlan_rx_add_vid(struct net_device *dev,
 522				 __be16 proto, u16 vid);
 523static int rhine_vlan_rx_kill_vid(struct net_device *dev,
 524				  __be16 proto, u16 vid);
 525static void rhine_restart_tx(struct net_device *dev);
 526
 527static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
 528{
 529	void __iomem *ioaddr = rp->base;
 530	int i;
 531
 532	for (i = 0; i < 1024; i++) {
 533		bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
 534
 535		if (low ^ has_mask_bits)
 536			break;
 537		udelay(10);
 538	}
 539	if (i > 64) {
 540		netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
 541			  "count: %04d\n", low ? "low" : "high", reg, mask, i);
 542	}
 543}
 544
 545static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
 546{
 547	rhine_wait_bit(rp, reg, mask, false);
 548}
 549
 550static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
 551{
 552	rhine_wait_bit(rp, reg, mask, true);
 553}
 554
 555static u32 rhine_get_events(struct rhine_private *rp)
 556{
 557	void __iomem *ioaddr = rp->base;
 558	u32 intr_status;
 559
 560	intr_status = ioread16(ioaddr + IntrStatus);
 561	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 562	if (rp->quirks & rqStatusWBRace)
 563		intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 564	return intr_status;
 565}
 566
 567static void rhine_ack_events(struct rhine_private *rp, u32 mask)
 568{
 569	void __iomem *ioaddr = rp->base;
 570
 571	if (rp->quirks & rqStatusWBRace)
 572		iowrite8(mask >> 16, ioaddr + IntrStatus2);
 573	iowrite16(mask, ioaddr + IntrStatus);
 574}
 575
 576/*
 577 * Get power related registers into sane state.
 578 * Notify user about past WOL event.
 579 */
 580static void rhine_power_init(struct net_device *dev)
 581{
 582	struct rhine_private *rp = netdev_priv(dev);
 583	void __iomem *ioaddr = rp->base;
 584	u16 wolstat;
 585
 586	if (rp->quirks & rqWOL) {
 587		/* Make sure chip is in power state D0 */
 588		iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 589
 590		/* Disable "force PME-enable" */
 591		iowrite8(0x80, ioaddr + WOLcgClr);
 592
 593		/* Clear power-event config bits (WOL) */
 594		iowrite8(0xFF, ioaddr + WOLcrClr);
 595		/* More recent cards can manage two additional patterns */
 596		if (rp->quirks & rq6patterns)
 597			iowrite8(0x03, ioaddr + WOLcrClr1);
 598
 599		/* Save power-event status bits */
 600		wolstat = ioread8(ioaddr + PwrcsrSet);
 601		if (rp->quirks & rq6patterns)
 602			wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 603
 604		/* Clear power-event status bits */
 605		iowrite8(0xFF, ioaddr + PwrcsrClr);
 606		if (rp->quirks & rq6patterns)
 607			iowrite8(0x03, ioaddr + PwrcsrClr1);
 608
 609		if (wolstat) {
 610			char *reason;
 611			switch (wolstat) {
 612			case WOLmagic:
 613				reason = "Magic packet";
 614				break;
 615			case WOLlnkon:
 616				reason = "Link went up";
 617				break;
 618			case WOLlnkoff:
 619				reason = "Link went down";
 620				break;
 621			case WOLucast:
 622				reason = "Unicast packet";
 623				break;
 624			case WOLbmcast:
 625				reason = "Multicast/broadcast packet";
 626				break;
 627			default:
 628				reason = "Unknown";
 629			}
 630			netdev_info(dev, "Woke system up. Reason: %s\n",
 631				    reason);
 632		}
 633	}
 634}
 635
 636static void rhine_chip_reset(struct net_device *dev)
 637{
 638	struct rhine_private *rp = netdev_priv(dev);
 639	void __iomem *ioaddr = rp->base;
 640	u8 cmd1;
 641
 642	iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 643	IOSYNC;
 644
 645	if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 646		netdev_info(dev, "Reset not complete yet. Trying harder.\n");
 647
 648		/* Force reset */
 649		if (rp->quirks & rqForceReset)
 650			iowrite8(0x40, ioaddr + MiscCmd);
 651
 652		/* Reset can take somewhat longer (rare) */
 653		rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
 654	}
 655
 656	cmd1 = ioread8(ioaddr + ChipCmd1);
 657	netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
 658		   "failed" : "succeeded");
 659}
 660
 661static void enable_mmio(long pioaddr, u32 quirks)
 662{
 663	int n;
 664
 665	if (quirks & rqNeedEnMMIO) {
 666		if (quirks & rqRhineI) {
 667			/* More recent docs say that this bit is reserved */
 668			n = inb(pioaddr + ConfigA) | 0x20;
 669			outb(n, pioaddr + ConfigA);
 670		} else {
 671			n = inb(pioaddr + ConfigD) | 0x80;
 672			outb(n, pioaddr + ConfigD);
 673		}
 674	}
 675}
 676
 677static inline int verify_mmio(struct device *hwdev,
 678			      long pioaddr,
 679			      void __iomem *ioaddr,
 680			      u32 quirks)
 681{
 682	if (quirks & rqNeedEnMMIO) {
 683		int i = 0;
 684
 685		/* Check that selected MMIO registers match the PIO ones */
 686		while (mmio_verify_registers[i]) {
 687			int reg = mmio_verify_registers[i++];
 688			unsigned char a = inb(pioaddr+reg);
 689			unsigned char b = readb(ioaddr+reg);
 690
 691			if (a != b) {
 692				dev_err(hwdev,
 693					"MMIO do not match PIO [%02x] (%02x != %02x)\n",
 694					reg, a, b);
 695				return -EIO;
 696			}
 697		}
 698	}
 699	return 0;
 700}
 701
 702/*
 703 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 704 * (plus 0x6C for Rhine-I/II)
 705 */
 706static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 707{
 708	struct rhine_private *rp = netdev_priv(dev);
 709	void __iomem *ioaddr = rp->base;
 710	int i;
 711
 712	outb(0x20, pioaddr + MACRegEEcsr);
 713	for (i = 0; i < 1024; i++) {
 714		if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
 715			break;
 716	}
 717	if (i > 512)
 718		pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
 719
 720	/*
 721	 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 722	 * MMIO. If reloading EEPROM was done first this could be avoided, but
 723	 * it is not known if that still works with the "win98-reboot" problem.
 724	 */
 725	enable_mmio(pioaddr, rp->quirks);
 726
 727	/* Turn off EEPROM-controlled wake-up (magic packet) */
 728	if (rp->quirks & rqWOL)
 729		iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 730
 731}
 732
 733#ifdef CONFIG_NET_POLL_CONTROLLER
 734static void rhine_poll(struct net_device *dev)
 735{
 736	struct rhine_private *rp = netdev_priv(dev);
 737	const int irq = rp->irq;
 738
 739	disable_irq(irq);
 740	rhine_interrupt(irq, dev);
 741	enable_irq(irq);
 742}
 743#endif
 744
 745static void rhine_kick_tx_threshold(struct rhine_private *rp)
 746{
 747	if (rp->tx_thresh < 0xe0) {
 748		void __iomem *ioaddr = rp->base;
 749
 750		rp->tx_thresh += 0x20;
 751		BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
 752	}
 753}
 754
 755static void rhine_tx_err(struct rhine_private *rp, u32 status)
 756{
 757	struct net_device *dev = rp->dev;
 758
 759	if (status & IntrTxAborted) {
 760		netif_info(rp, tx_err, dev,
 761			   "Abort %08x, frame dropped\n", status);
 762	}
 763
 764	if (status & IntrTxUnderrun) {
 765		rhine_kick_tx_threshold(rp);
 766		netif_info(rp, tx_err ,dev, "Transmitter underrun, "
 767			   "Tx threshold now %02x\n", rp->tx_thresh);
 768	}
 769
 770	if (status & IntrTxDescRace)
 771		netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
 772
 773	if ((status & IntrTxError) &&
 774	    (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
 775		rhine_kick_tx_threshold(rp);
 776		netif_info(rp, tx_err, dev, "Unspecified error. "
 777			   "Tx threshold now %02x\n", rp->tx_thresh);
 778	}
 779
 780	rhine_restart_tx(dev);
 781}
 782
 783static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
 784{
 785	void __iomem *ioaddr = rp->base;
 786	struct net_device_stats *stats = &rp->dev->stats;
 787
 788	stats->rx_crc_errors    += ioread16(ioaddr + RxCRCErrs);
 789	stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
 790
 791	/*
 792	 * Clears the "tally counters" for CRC errors and missed frames(?).
 793	 * It has been reported that some chips need a write of 0 to clear
 794	 * these, for others the counters are set to 1 when written to and
 795	 * instead cleared when read. So we clear them both ways ...
 796	 */
 797	iowrite32(0, ioaddr + RxMissed);
 798	ioread16(ioaddr + RxCRCErrs);
 799	ioread16(ioaddr + RxMissed);
 800}
 801
 802#define RHINE_EVENT_NAPI_RX	(IntrRxDone | \
 803				 IntrRxErr | \
 804				 IntrRxEmpty | \
 805				 IntrRxOverflow	| \
 806				 IntrRxDropped | \
 807				 IntrRxNoBuf | \
 808				 IntrRxWakeUp)
 809
 810#define RHINE_EVENT_NAPI_TX_ERR	(IntrTxError | \
 811				 IntrTxAborted | \
 812				 IntrTxUnderrun | \
 813				 IntrTxDescRace)
 814#define RHINE_EVENT_NAPI_TX	(IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
 815
 816#define RHINE_EVENT_NAPI	(RHINE_EVENT_NAPI_RX | \
 817				 RHINE_EVENT_NAPI_TX | \
 818				 IntrStatsMax)
 819#define RHINE_EVENT_SLOW	(IntrPCIErr | IntrLinkChange)
 820#define RHINE_EVENT		(RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
 821
 822static int rhine_napipoll(struct napi_struct *napi, int budget)
 823{
 824	struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 825	struct net_device *dev = rp->dev;
 826	void __iomem *ioaddr = rp->base;
 827	u16 enable_mask = RHINE_EVENT & 0xffff;
 828	int work_done = 0;
 829	u32 status;
 830
 831	status = rhine_get_events(rp);
 832	rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
 833
 834	if (status & RHINE_EVENT_NAPI_RX)
 835		work_done += rhine_rx(dev, budget);
 836
 837	if (status & RHINE_EVENT_NAPI_TX) {
 838		if (status & RHINE_EVENT_NAPI_TX_ERR) {
 839			/* Avoid scavenging before Tx engine turned off */
 840			rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
 841			if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
 842				netif_warn(rp, tx_err, dev, "Tx still on\n");
 843		}
 844
 845		rhine_tx(dev);
 846
 847		if (status & RHINE_EVENT_NAPI_TX_ERR)
 848			rhine_tx_err(rp, status);
 849	}
 850
 851	if (status & IntrStatsMax) {
 852		spin_lock(&rp->lock);
 853		rhine_update_rx_crc_and_missed_errord(rp);
 854		spin_unlock(&rp->lock);
 855	}
 856
 857	if (status & RHINE_EVENT_SLOW) {
 858		enable_mask &= ~RHINE_EVENT_SLOW;
 859		schedule_work(&rp->slow_event_task);
 860	}
 861
 862	if (work_done < budget) {
 863		napi_complete_done(napi, work_done);
 864		iowrite16(enable_mask, ioaddr + IntrEnable);
 865	}
 866	return work_done;
 867}
 868
 869static void rhine_hw_init(struct net_device *dev, long pioaddr)
 870{
 871	struct rhine_private *rp = netdev_priv(dev);
 872
 873	/* Reset the chip to erase previous misconfiguration. */
 874	rhine_chip_reset(dev);
 875
 876	/* Rhine-I needs extra time to recuperate before EEPROM reload */
 877	if (rp->quirks & rqRhineI)
 878		msleep(5);
 879
 880	/* Reload EEPROM controlled bytes cleared by soft reset */
 881	if (dev_is_pci(dev->dev.parent))
 882		rhine_reload_eeprom(pioaddr, dev);
 883}
 884
 885static const struct net_device_ops rhine_netdev_ops = {
 886	.ndo_open		 = rhine_open,
 887	.ndo_stop		 = rhine_close,
 888	.ndo_start_xmit		 = rhine_start_tx,
 889	.ndo_get_stats64	 = rhine_get_stats64,
 890	.ndo_set_rx_mode	 = rhine_set_rx_mode,
 891	.ndo_validate_addr	 = eth_validate_addr,
 892	.ndo_set_mac_address 	 = eth_mac_addr,
 893	.ndo_do_ioctl		 = netdev_ioctl,
 894	.ndo_tx_timeout 	 = rhine_tx_timeout,
 895	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
 896	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
 897#ifdef CONFIG_NET_POLL_CONTROLLER
 898	.ndo_poll_controller	 = rhine_poll,
 899#endif
 900};
 901
 902static int rhine_init_one_common(struct device *hwdev, u32 quirks,
 903				 long pioaddr, void __iomem *ioaddr, int irq)
 904{
 905	struct net_device *dev;
 906	struct rhine_private *rp;
 907	int i, rc, phy_id;
 
 908	const char *name;
 909
 910	/* this should always be supported */
 911	rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
 912	if (rc) {
 913		dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
 914		goto err_out;
 915	}
 916
 917	dev = alloc_etherdev(sizeof(struct rhine_private));
 918	if (!dev) {
 919		rc = -ENOMEM;
 920		goto err_out;
 921	}
 922	SET_NETDEV_DEV(dev, hwdev);
 923
 924	rp = netdev_priv(dev);
 925	rp->dev = dev;
 926	rp->quirks = quirks;
 927	rp->pioaddr = pioaddr;
 928	rp->base = ioaddr;
 929	rp->irq = irq;
 930	rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
 931
 932	phy_id = rp->quirks & rqIntPHY ? 1 : 0;
 933
 934	u64_stats_init(&rp->tx_stats.syncp);
 935	u64_stats_init(&rp->rx_stats.syncp);
 936
 937	/* Get chip registers into a sane state */
 938	rhine_power_init(dev);
 939	rhine_hw_init(dev, pioaddr);
 940
 941	for (i = 0; i < 6; i++)
 942		dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
 
 943
 944	if (!is_valid_ether_addr(dev->dev_addr)) {
 945		/* Report it and use a random ethernet address instead */
 946		netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
 947		eth_hw_addr_random(dev);
 948		netdev_info(dev, "Using random MAC address: %pM\n",
 949			    dev->dev_addr);
 950	}
 951
 952	/* For Rhine-I/II, phy_id is loaded from EEPROM */
 953	if (!phy_id)
 954		phy_id = ioread8(ioaddr + 0x6C);
 955
 956	spin_lock_init(&rp->lock);
 957	mutex_init(&rp->task_lock);
 958	INIT_WORK(&rp->reset_task, rhine_reset_task);
 959	INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
 960
 961	rp->mii_if.dev = dev;
 962	rp->mii_if.mdio_read = mdio_read;
 963	rp->mii_if.mdio_write = mdio_write;
 964	rp->mii_if.phy_id_mask = 0x1f;
 965	rp->mii_if.reg_num_mask = 0x1f;
 966
 967	/* The chip-specific entries in the device structure. */
 968	dev->netdev_ops = &rhine_netdev_ops;
 969	dev->ethtool_ops = &netdev_ethtool_ops;
 970	dev->watchdog_timeo = TX_TIMEOUT;
 971
 972	netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
 973
 974	if (rp->quirks & rqRhineI)
 975		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 976
 977	if (rp->quirks & rqMgmt)
 978		dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
 979				 NETIF_F_HW_VLAN_CTAG_RX |
 980				 NETIF_F_HW_VLAN_CTAG_FILTER;
 981
 982	/* dev->name not defined before register_netdev()! */
 983	rc = register_netdev(dev);
 984	if (rc)
 985		goto err_out_free_netdev;
 986
 987	if (rp->quirks & rqRhineI)
 988		name = "Rhine";
 989	else if (rp->quirks & rqStatusWBRace)
 990		name = "Rhine II";
 991	else if (rp->quirks & rqMgmt)
 992		name = "Rhine III (Management Adapter)";
 993	else
 994		name = "Rhine III";
 995
 996	netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
 997		    name, ioaddr, dev->dev_addr, rp->irq);
 998
 999	dev_set_drvdata(hwdev, dev);
1000
1001	{
1002		u16 mii_cmd;
1003		int mii_status = mdio_read(dev, phy_id, 1);
1004		mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1005		mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1006		if (mii_status != 0xffff && mii_status != 0x0000) {
1007			rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1008			netdev_info(dev,
1009				    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1010				    phy_id,
1011				    mii_status, rp->mii_if.advertising,
1012				    mdio_read(dev, phy_id, 5));
1013
1014			/* set IFF_RUNNING */
1015			if (mii_status & BMSR_LSTATUS)
1016				netif_carrier_on(dev);
1017			else
1018				netif_carrier_off(dev);
1019
1020		}
1021	}
1022	rp->mii_if.phy_id = phy_id;
1023	if (avoid_D3)
1024		netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1025
1026	return 0;
1027
1028err_out_free_netdev:
1029	free_netdev(dev);
1030err_out:
1031	return rc;
1032}
1033
1034static int rhine_init_one_pci(struct pci_dev *pdev,
1035			      const struct pci_device_id *ent)
1036{
1037	struct device *hwdev = &pdev->dev;
1038	int rc;
1039	long pioaddr, memaddr;
1040	void __iomem *ioaddr;
1041	int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1042
1043/* This driver was written to use PCI memory space. Some early versions
1044 * of the Rhine may only work correctly with I/O space accesses.
1045 * TODO: determine for which revisions this is true and assign the flag
1046 *	 in code as opposed to this Kconfig option (???)
1047 */
1048#ifdef CONFIG_VIA_RHINE_MMIO
1049	u32 quirks = rqNeedEnMMIO;
1050#else
1051	u32 quirks = 0;
1052#endif
1053
1054/* when built into the kernel, we only print version if device is found */
1055#ifndef MODULE
1056	pr_info_once("%s\n", version);
1057#endif
1058
1059	rc = pci_enable_device(pdev);
1060	if (rc)
1061		goto err_out;
1062
1063	if (pdev->revision < VTunknown0) {
1064		quirks |= rqRhineI;
1065	} else if (pdev->revision >= VT6102) {
1066		quirks |= rqWOL | rqForceReset;
1067		if (pdev->revision < VT6105) {
1068			quirks |= rqStatusWBRace;
1069		} else {
1070			quirks |= rqIntPHY;
1071			if (pdev->revision >= VT6105_B0)
1072				quirks |= rq6patterns;
1073			if (pdev->revision >= VT6105M)
1074				quirks |= rqMgmt;
1075		}
1076	}
1077
1078	/* sanity check */
1079	if ((pci_resource_len(pdev, 0) < io_size) ||
1080	    (pci_resource_len(pdev, 1) < io_size)) {
1081		rc = -EIO;
1082		dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1083		goto err_out_pci_disable;
1084	}
1085
1086	pioaddr = pci_resource_start(pdev, 0);
1087	memaddr = pci_resource_start(pdev, 1);
1088
1089	pci_set_master(pdev);
1090
1091	rc = pci_request_regions(pdev, DRV_NAME);
1092	if (rc)
1093		goto err_out_pci_disable;
1094
1095	ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1096	if (!ioaddr) {
1097		rc = -EIO;
1098		dev_err(hwdev,
1099			"ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1100			dev_name(hwdev), io_size, memaddr);
1101		goto err_out_free_res;
1102	}
1103
1104	enable_mmio(pioaddr, quirks);
1105
1106	rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1107	if (rc)
1108		goto err_out_unmap;
1109
1110	rc = rhine_init_one_common(&pdev->dev, quirks,
1111				   pioaddr, ioaddr, pdev->irq);
1112	if (!rc)
1113		return 0;
1114
1115err_out_unmap:
1116	pci_iounmap(pdev, ioaddr);
1117err_out_free_res:
1118	pci_release_regions(pdev);
1119err_out_pci_disable:
1120	pci_disable_device(pdev);
1121err_out:
1122	return rc;
1123}
1124
1125static int rhine_init_one_platform(struct platform_device *pdev)
1126{
1127	const struct of_device_id *match;
1128	const u32 *quirks;
1129	int irq;
1130	void __iomem *ioaddr;
1131
1132	match = of_match_device(rhine_of_tbl, &pdev->dev);
1133	if (!match)
1134		return -EINVAL;
1135
1136	ioaddr = devm_platform_ioremap_resource(pdev, 0);
1137	if (IS_ERR(ioaddr))
1138		return PTR_ERR(ioaddr);
1139
1140	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1141	if (!irq)
1142		return -EINVAL;
1143
1144	quirks = match->data;
1145	if (!quirks)
1146		return -EINVAL;
1147
1148	return rhine_init_one_common(&pdev->dev, *quirks,
1149				     (long)ioaddr, ioaddr, irq);
1150}
1151
1152static int alloc_ring(struct net_device* dev)
1153{
1154	struct rhine_private *rp = netdev_priv(dev);
1155	struct device *hwdev = dev->dev.parent;
1156	void *ring;
1157	dma_addr_t ring_dma;
1158
1159	ring = dma_alloc_coherent(hwdev,
1160				  RX_RING_SIZE * sizeof(struct rx_desc) +
1161				  TX_RING_SIZE * sizeof(struct tx_desc),
1162				  &ring_dma,
1163				  GFP_ATOMIC);
1164	if (!ring) {
1165		netdev_err(dev, "Could not allocate DMA memory\n");
1166		return -ENOMEM;
1167	}
1168	if (rp->quirks & rqRhineI) {
1169		rp->tx_bufs = dma_alloc_coherent(hwdev,
1170						 PKT_BUF_SZ * TX_RING_SIZE,
1171						 &rp->tx_bufs_dma,
1172						 GFP_ATOMIC);
1173		if (rp->tx_bufs == NULL) {
1174			dma_free_coherent(hwdev,
1175					  RX_RING_SIZE * sizeof(struct rx_desc) +
1176					  TX_RING_SIZE * sizeof(struct tx_desc),
1177					  ring, ring_dma);
1178			return -ENOMEM;
1179		}
1180	}
1181
1182	rp->rx_ring = ring;
1183	rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1184	rp->rx_ring_dma = ring_dma;
1185	rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1186
1187	return 0;
1188}
1189
1190static void free_ring(struct net_device* dev)
1191{
1192	struct rhine_private *rp = netdev_priv(dev);
1193	struct device *hwdev = dev->dev.parent;
1194
1195	dma_free_coherent(hwdev,
1196			  RX_RING_SIZE * sizeof(struct rx_desc) +
1197			  TX_RING_SIZE * sizeof(struct tx_desc),
1198			  rp->rx_ring, rp->rx_ring_dma);
1199	rp->tx_ring = NULL;
1200
1201	if (rp->tx_bufs)
1202		dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1203				  rp->tx_bufs, rp->tx_bufs_dma);
1204
1205	rp->tx_bufs = NULL;
1206
1207}
1208
1209struct rhine_skb_dma {
1210	struct sk_buff *skb;
1211	dma_addr_t dma;
1212};
1213
1214static inline int rhine_skb_dma_init(struct net_device *dev,
1215				     struct rhine_skb_dma *sd)
1216{
1217	struct rhine_private *rp = netdev_priv(dev);
1218	struct device *hwdev = dev->dev.parent;
1219	const int size = rp->rx_buf_sz;
1220
1221	sd->skb = netdev_alloc_skb(dev, size);
1222	if (!sd->skb)
1223		return -ENOMEM;
1224
1225	sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1226	if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1227		netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1228		dev_kfree_skb_any(sd->skb);
1229		return -EIO;
1230	}
1231
1232	return 0;
1233}
1234
1235static void rhine_reset_rbufs(struct rhine_private *rp)
1236{
1237	int i;
1238
1239	rp->cur_rx = 0;
1240
1241	for (i = 0; i < RX_RING_SIZE; i++)
1242		rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1243}
1244
1245static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1246					   struct rhine_skb_dma *sd, int entry)
1247{
1248	rp->rx_skbuff_dma[entry] = sd->dma;
1249	rp->rx_skbuff[entry] = sd->skb;
1250
1251	rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1252	dma_wmb();
1253}
1254
1255static void free_rbufs(struct net_device* dev);
1256
1257static int alloc_rbufs(struct net_device *dev)
1258{
1259	struct rhine_private *rp = netdev_priv(dev);
1260	dma_addr_t next;
1261	int rc, i;
1262
1263	rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1264	next = rp->rx_ring_dma;
1265
1266	/* Init the ring entries */
1267	for (i = 0; i < RX_RING_SIZE; i++) {
1268		rp->rx_ring[i].rx_status = 0;
1269		rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1270		next += sizeof(struct rx_desc);
1271		rp->rx_ring[i].next_desc = cpu_to_le32(next);
1272		rp->rx_skbuff[i] = NULL;
1273	}
1274	/* Mark the last entry as wrapping the ring. */
1275	rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1276
1277	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1278	for (i = 0; i < RX_RING_SIZE; i++) {
1279		struct rhine_skb_dma sd;
1280
1281		rc = rhine_skb_dma_init(dev, &sd);
1282		if (rc < 0) {
1283			free_rbufs(dev);
1284			goto out;
1285		}
1286
1287		rhine_skb_dma_nic_store(rp, &sd, i);
1288	}
1289
1290	rhine_reset_rbufs(rp);
1291out:
1292	return rc;
1293}
1294
1295static void free_rbufs(struct net_device* dev)
1296{
1297	struct rhine_private *rp = netdev_priv(dev);
1298	struct device *hwdev = dev->dev.parent;
1299	int i;
1300
1301	/* Free all the skbuffs in the Rx queue. */
1302	for (i = 0; i < RX_RING_SIZE; i++) {
1303		rp->rx_ring[i].rx_status = 0;
1304		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1305		if (rp->rx_skbuff[i]) {
1306			dma_unmap_single(hwdev,
1307					 rp->rx_skbuff_dma[i],
1308					 rp->rx_buf_sz, DMA_FROM_DEVICE);
1309			dev_kfree_skb(rp->rx_skbuff[i]);
1310		}
1311		rp->rx_skbuff[i] = NULL;
1312	}
1313}
1314
1315static void alloc_tbufs(struct net_device* dev)
1316{
1317	struct rhine_private *rp = netdev_priv(dev);
1318	dma_addr_t next;
1319	int i;
1320
1321	rp->dirty_tx = rp->cur_tx = 0;
1322	next = rp->tx_ring_dma;
1323	for (i = 0; i < TX_RING_SIZE; i++) {
1324		rp->tx_skbuff[i] = NULL;
1325		rp->tx_ring[i].tx_status = 0;
1326		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1327		next += sizeof(struct tx_desc);
1328		rp->tx_ring[i].next_desc = cpu_to_le32(next);
1329		if (rp->quirks & rqRhineI)
1330			rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1331	}
1332	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1333
1334	netdev_reset_queue(dev);
1335}
1336
1337static void free_tbufs(struct net_device* dev)
1338{
1339	struct rhine_private *rp = netdev_priv(dev);
1340	struct device *hwdev = dev->dev.parent;
1341	int i;
1342
1343	for (i = 0; i < TX_RING_SIZE; i++) {
1344		rp->tx_ring[i].tx_status = 0;
1345		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1346		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1347		if (rp->tx_skbuff[i]) {
1348			if (rp->tx_skbuff_dma[i]) {
1349				dma_unmap_single(hwdev,
1350						 rp->tx_skbuff_dma[i],
1351						 rp->tx_skbuff[i]->len,
1352						 DMA_TO_DEVICE);
1353			}
1354			dev_kfree_skb(rp->tx_skbuff[i]);
1355		}
1356		rp->tx_skbuff[i] = NULL;
1357		rp->tx_buf[i] = NULL;
1358	}
1359}
1360
1361static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1362{
1363	struct rhine_private *rp = netdev_priv(dev);
1364	void __iomem *ioaddr = rp->base;
1365
1366	if (!rp->mii_if.force_media)
1367		mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1368
1369	if (rp->mii_if.full_duplex)
1370	    iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1371		   ioaddr + ChipCmd1);
1372	else
1373	    iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1374		   ioaddr + ChipCmd1);
1375
1376	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1377		   rp->mii_if.force_media, netif_carrier_ok(dev));
1378}
1379
1380/* Called after status of force_media possibly changed */
1381static void rhine_set_carrier(struct mii_if_info *mii)
1382{
1383	struct net_device *dev = mii->dev;
1384	struct rhine_private *rp = netdev_priv(dev);
1385
1386	if (mii->force_media) {
1387		/* autoneg is off: Link is always assumed to be up */
1388		if (!netif_carrier_ok(dev))
1389			netif_carrier_on(dev);
1390	}
1391
1392	rhine_check_media(dev, 0);
1393
1394	netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1395		   mii->force_media, netif_carrier_ok(dev));
1396}
1397
1398/**
1399 * rhine_set_cam - set CAM multicast filters
1400 * @ioaddr: register block of this Rhine
1401 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1402 * @addr: multicast address (6 bytes)
1403 *
1404 * Load addresses into multicast filters.
1405 */
1406static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1407{
1408	int i;
1409
1410	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1411	wmb();
1412
1413	/* Paranoid -- idx out of range should never happen */
1414	idx &= (MCAM_SIZE - 1);
1415
1416	iowrite8((u8) idx, ioaddr + CamAddr);
1417
1418	for (i = 0; i < 6; i++, addr++)
1419		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1420	udelay(10);
1421	wmb();
1422
1423	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1424	udelay(10);
1425
1426	iowrite8(0, ioaddr + CamCon);
1427}
1428
1429/**
1430 * rhine_set_vlan_cam - set CAM VLAN filters
1431 * @ioaddr: register block of this Rhine
1432 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1433 * @addr: VLAN ID (2 bytes)
1434 *
1435 * Load addresses into VLAN filters.
1436 */
1437static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1438{
1439	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1440	wmb();
1441
1442	/* Paranoid -- idx out of range should never happen */
1443	idx &= (VCAM_SIZE - 1);
1444
1445	iowrite8((u8) idx, ioaddr + CamAddr);
1446
1447	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1448	udelay(10);
1449	wmb();
1450
1451	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1452	udelay(10);
1453
1454	iowrite8(0, ioaddr + CamCon);
1455}
1456
1457/**
1458 * rhine_set_cam_mask - set multicast CAM mask
1459 * @ioaddr: register block of this Rhine
1460 * @mask: multicast CAM mask
1461 *
1462 * Mask sets multicast filters active/inactive.
1463 */
1464static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1465{
1466	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1467	wmb();
1468
1469	/* write mask */
1470	iowrite32(mask, ioaddr + CamMask);
1471
1472	/* disable CAMEN */
1473	iowrite8(0, ioaddr + CamCon);
1474}
1475
1476/**
1477 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1478 * @ioaddr: register block of this Rhine
1479 * @mask: VLAN CAM mask
1480 *
1481 * Mask sets VLAN filters active/inactive.
1482 */
1483static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1484{
1485	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1486	wmb();
1487
1488	/* write mask */
1489	iowrite32(mask, ioaddr + CamMask);
1490
1491	/* disable CAMEN */
1492	iowrite8(0, ioaddr + CamCon);
1493}
1494
1495/**
1496 * rhine_init_cam_filter - initialize CAM filters
1497 * @dev: network device
1498 *
1499 * Initialize (disable) hardware VLAN and multicast support on this
1500 * Rhine.
1501 */
1502static void rhine_init_cam_filter(struct net_device *dev)
1503{
1504	struct rhine_private *rp = netdev_priv(dev);
1505	void __iomem *ioaddr = rp->base;
1506
1507	/* Disable all CAMs */
1508	rhine_set_vlan_cam_mask(ioaddr, 0);
1509	rhine_set_cam_mask(ioaddr, 0);
1510
1511	/* disable hardware VLAN support */
1512	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1513	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1514}
1515
1516/**
1517 * rhine_update_vcam - update VLAN CAM filters
1518 * @rp: rhine_private data of this Rhine
1519 *
1520 * Update VLAN CAM filters to match configuration change.
1521 */
1522static void rhine_update_vcam(struct net_device *dev)
1523{
1524	struct rhine_private *rp = netdev_priv(dev);
1525	void __iomem *ioaddr = rp->base;
1526	u16 vid;
1527	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
1528	unsigned int i = 0;
1529
1530	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1531		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1532		vCAMmask |= 1 << i;
1533		if (++i >= VCAM_SIZE)
1534			break;
1535	}
1536	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1537}
1538
1539static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1540{
1541	struct rhine_private *rp = netdev_priv(dev);
1542
1543	spin_lock_bh(&rp->lock);
1544	set_bit(vid, rp->active_vlans);
1545	rhine_update_vcam(dev);
1546	spin_unlock_bh(&rp->lock);
1547	return 0;
1548}
1549
1550static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1551{
1552	struct rhine_private *rp = netdev_priv(dev);
1553
1554	spin_lock_bh(&rp->lock);
1555	clear_bit(vid, rp->active_vlans);
1556	rhine_update_vcam(dev);
1557	spin_unlock_bh(&rp->lock);
1558	return 0;
1559}
1560
1561static void init_registers(struct net_device *dev)
1562{
1563	struct rhine_private *rp = netdev_priv(dev);
1564	void __iomem *ioaddr = rp->base;
1565	int i;
1566
1567	for (i = 0; i < 6; i++)
1568		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1569
1570	/* Initialize other registers. */
1571	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
1572	/* Configure initial FIFO thresholds. */
1573	iowrite8(0x20, ioaddr + TxConfig);
1574	rp->tx_thresh = 0x20;
1575	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */
1576
1577	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1578	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1579
1580	rhine_set_rx_mode(dev);
1581
1582	if (rp->quirks & rqMgmt)
1583		rhine_init_cam_filter(dev);
1584
1585	napi_enable(&rp->napi);
1586
1587	iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1588
1589	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1590	       ioaddr + ChipCmd);
1591	rhine_check_media(dev, 1);
1592}
1593
1594/* Enable MII link status auto-polling (required for IntrLinkChange) */
1595static void rhine_enable_linkmon(struct rhine_private *rp)
1596{
1597	void __iomem *ioaddr = rp->base;
1598
1599	iowrite8(0, ioaddr + MIICmd);
1600	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1601	iowrite8(0x80, ioaddr + MIICmd);
1602
1603	rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1604
1605	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1606}
1607
1608/* Disable MII link status auto-polling (required for MDIO access) */
1609static void rhine_disable_linkmon(struct rhine_private *rp)
1610{
1611	void __iomem *ioaddr = rp->base;
1612
1613	iowrite8(0, ioaddr + MIICmd);
1614
1615	if (rp->quirks & rqRhineI) {
1616		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR
1617
1618		/* Can be called from ISR. Evil. */
1619		mdelay(1);
1620
1621		/* 0x80 must be set immediately before turning it off */
1622		iowrite8(0x80, ioaddr + MIICmd);
1623
1624		rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1625
1626		/* Heh. Now clear 0x80 again. */
1627		iowrite8(0, ioaddr + MIICmd);
1628	}
1629	else
1630		rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1631}
1632
1633/* Read and write over the MII Management Data I/O (MDIO) interface. */
1634
1635static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1636{
1637	struct rhine_private *rp = netdev_priv(dev);
1638	void __iomem *ioaddr = rp->base;
1639	int result;
1640
1641	rhine_disable_linkmon(rp);
1642
1643	/* rhine_disable_linkmon already cleared MIICmd */
1644	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1645	iowrite8(regnum, ioaddr + MIIRegAddr);
1646	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */
1647	rhine_wait_bit_low(rp, MIICmd, 0x40);
1648	result = ioread16(ioaddr + MIIData);
1649
1650	rhine_enable_linkmon(rp);
1651	return result;
1652}
1653
1654static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1655{
1656	struct rhine_private *rp = netdev_priv(dev);
1657	void __iomem *ioaddr = rp->base;
1658
1659	rhine_disable_linkmon(rp);
1660
1661	/* rhine_disable_linkmon already cleared MIICmd */
1662	iowrite8(phy_id, ioaddr + MIIPhyAddr);
1663	iowrite8(regnum, ioaddr + MIIRegAddr);
1664	iowrite16(value, ioaddr + MIIData);
1665	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */
1666	rhine_wait_bit_low(rp, MIICmd, 0x20);
1667
1668	rhine_enable_linkmon(rp);
1669}
1670
1671static void rhine_task_disable(struct rhine_private *rp)
1672{
1673	mutex_lock(&rp->task_lock);
1674	rp->task_enable = false;
1675	mutex_unlock(&rp->task_lock);
1676
1677	cancel_work_sync(&rp->slow_event_task);
1678	cancel_work_sync(&rp->reset_task);
1679}
1680
1681static void rhine_task_enable(struct rhine_private *rp)
1682{
1683	mutex_lock(&rp->task_lock);
1684	rp->task_enable = true;
1685	mutex_unlock(&rp->task_lock);
1686}
1687
1688static int rhine_open(struct net_device *dev)
1689{
1690	struct rhine_private *rp = netdev_priv(dev);
1691	void __iomem *ioaddr = rp->base;
1692	int rc;
1693
1694	rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1695	if (rc)
1696		goto out;
1697
1698	netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1699
1700	rc = alloc_ring(dev);
1701	if (rc < 0)
1702		goto out_free_irq;
1703
1704	rc = alloc_rbufs(dev);
1705	if (rc < 0)
1706		goto out_free_ring;
1707
1708	alloc_tbufs(dev);
 
 
1709	rhine_chip_reset(dev);
1710	rhine_task_enable(rp);
1711	init_registers(dev);
1712
1713	netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1714		  __func__, ioread16(ioaddr + ChipCmd),
1715		  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1716
1717	netif_start_queue(dev);
1718
1719out:
1720	return rc;
1721
1722out_free_ring:
1723	free_ring(dev);
1724out_free_irq:
1725	free_irq(rp->irq, dev);
1726	goto out;
1727}
1728
1729static void rhine_reset_task(struct work_struct *work)
1730{
1731	struct rhine_private *rp = container_of(work, struct rhine_private,
1732						reset_task);
1733	struct net_device *dev = rp->dev;
1734
1735	mutex_lock(&rp->task_lock);
1736
1737	if (!rp->task_enable)
1738		goto out_unlock;
1739
1740	napi_disable(&rp->napi);
1741	netif_tx_disable(dev);
1742	spin_lock_bh(&rp->lock);
1743
1744	/* clear all descriptors */
1745	free_tbufs(dev);
1746	alloc_tbufs(dev);
1747
1748	rhine_reset_rbufs(rp);
1749
1750	/* Reinitialize the hardware. */
1751	rhine_chip_reset(dev);
1752	init_registers(dev);
1753
1754	spin_unlock_bh(&rp->lock);
1755
1756	netif_trans_update(dev); /* prevent tx timeout */
1757	dev->stats.tx_errors++;
1758	netif_wake_queue(dev);
1759
1760out_unlock:
1761	mutex_unlock(&rp->task_lock);
1762}
1763
1764static void rhine_tx_timeout(struct net_device *dev)
1765{
1766	struct rhine_private *rp = netdev_priv(dev);
1767	void __iomem *ioaddr = rp->base;
1768
1769	netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1770		    ioread16(ioaddr + IntrStatus),
1771		    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1772
1773	schedule_work(&rp->reset_task);
1774}
1775
1776static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1777{
1778	return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1779}
1780
1781static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1782				  struct net_device *dev)
1783{
1784	struct rhine_private *rp = netdev_priv(dev);
1785	struct device *hwdev = dev->dev.parent;
1786	void __iomem *ioaddr = rp->base;
1787	unsigned entry;
1788
1789	/* Caution: the write order is important here, set the field
1790	   with the "ownership" bits last. */
1791
1792	/* Calculate the next Tx descriptor entry. */
1793	entry = rp->cur_tx % TX_RING_SIZE;
1794
1795	if (skb_padto(skb, ETH_ZLEN))
1796		return NETDEV_TX_OK;
1797
1798	rp->tx_skbuff[entry] = skb;
1799
1800	if ((rp->quirks & rqRhineI) &&
1801	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1802		/* Must use alignment buffer. */
1803		if (skb->len > PKT_BUF_SZ) {
1804			/* packet too long, drop it */
1805			dev_kfree_skb_any(skb);
1806			rp->tx_skbuff[entry] = NULL;
1807			dev->stats.tx_dropped++;
1808			return NETDEV_TX_OK;
1809		}
1810
1811		/* Padding is not copied and so must be redone. */
1812		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1813		if (skb->len < ETH_ZLEN)
1814			memset(rp->tx_buf[entry] + skb->len, 0,
1815			       ETH_ZLEN - skb->len);
1816		rp->tx_skbuff_dma[entry] = 0;
1817		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1818						      (rp->tx_buf[entry] -
1819						       rp->tx_bufs));
1820	} else {
1821		rp->tx_skbuff_dma[entry] =
1822			dma_map_single(hwdev, skb->data, skb->len,
1823				       DMA_TO_DEVICE);
1824		if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1825			dev_kfree_skb_any(skb);
1826			rp->tx_skbuff_dma[entry] = 0;
1827			dev->stats.tx_dropped++;
1828			return NETDEV_TX_OK;
1829		}
1830		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1831	}
1832
1833	rp->tx_ring[entry].desc_length =
1834		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1835
1836	if (unlikely(skb_vlan_tag_present(skb))) {
1837		u16 vid_pcp = skb_vlan_tag_get(skb);
1838
1839		/* drop CFI/DEI bit, register needs VID and PCP */
1840		vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1841			  ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1842		rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1843		/* request tagging */
1844		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1845	}
1846	else
1847		rp->tx_ring[entry].tx_status = 0;
1848
1849	netdev_sent_queue(dev, skb->len);
1850	/* lock eth irq */
1851	dma_wmb();
1852	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1853	wmb();
1854
1855	rp->cur_tx++;
1856	/*
1857	 * Nobody wants cur_tx write to rot for ages after the NIC will have
1858	 * seen the transmit request, especially as the transmit completion
1859	 * handler could miss it.
1860	 */
1861	smp_wmb();
1862
1863	/* Non-x86 Todo: explicitly flush cache lines here. */
1864
1865	if (skb_vlan_tag_present(skb))
1866		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1867		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1868
1869	/* Wake the potentially-idle transmit channel */
1870	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1871	       ioaddr + ChipCmd1);
1872	IOSYNC;
1873
1874	/* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1875	if (rhine_tx_queue_full(rp)) {
1876		netif_stop_queue(dev);
1877		smp_rmb();
1878		/* Rejuvenate. */
1879		if (!rhine_tx_queue_full(rp))
1880			netif_wake_queue(dev);
1881	}
1882
1883	netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1884		  rp->cur_tx - 1, entry);
1885
1886	return NETDEV_TX_OK;
1887}
1888
1889static void rhine_irq_disable(struct rhine_private *rp)
1890{
1891	iowrite16(0x0000, rp->base + IntrEnable);
1892}
1893
1894/* The interrupt handler does all of the Rx thread work and cleans up
1895   after the Tx thread. */
1896static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1897{
1898	struct net_device *dev = dev_instance;
1899	struct rhine_private *rp = netdev_priv(dev);
1900	u32 status;
1901	int handled = 0;
1902
1903	status = rhine_get_events(rp);
1904
1905	netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1906
1907	if (status & RHINE_EVENT) {
1908		handled = 1;
1909
1910		rhine_irq_disable(rp);
1911		napi_schedule(&rp->napi);
1912	}
1913
1914	if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1915		netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1916			  status);
1917	}
1918
1919	return IRQ_RETVAL(handled);
1920}
1921
1922/* This routine is logically part of the interrupt handler, but isolated
1923   for clarity. */
1924static void rhine_tx(struct net_device *dev)
1925{
1926	struct rhine_private *rp = netdev_priv(dev);
1927	struct device *hwdev = dev->dev.parent;
1928	unsigned int pkts_compl = 0, bytes_compl = 0;
1929	unsigned int dirty_tx = rp->dirty_tx;
1930	unsigned int cur_tx;
1931	struct sk_buff *skb;
1932
1933	/*
1934	 * The race with rhine_start_tx does not matter here as long as the
1935	 * driver enforces a value of cur_tx that was relevant when the
1936	 * packet was scheduled to the network chipset.
1937	 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1938	 */
1939	smp_rmb();
1940	cur_tx = rp->cur_tx;
1941	/* find and cleanup dirty tx descriptors */
1942	while (dirty_tx != cur_tx) {
1943		unsigned int entry = dirty_tx % TX_RING_SIZE;
1944		u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1945
1946		netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1947			  entry, txstatus);
1948		if (txstatus & DescOwn)
1949			break;
1950		skb = rp->tx_skbuff[entry];
1951		if (txstatus & 0x8000) {
1952			netif_dbg(rp, tx_done, dev,
1953				  "Transmit error, Tx status %08x\n", txstatus);
1954			dev->stats.tx_errors++;
1955			if (txstatus & 0x0400)
1956				dev->stats.tx_carrier_errors++;
1957			if (txstatus & 0x0200)
1958				dev->stats.tx_window_errors++;
1959			if (txstatus & 0x0100)
1960				dev->stats.tx_aborted_errors++;
1961			if (txstatus & 0x0080)
1962				dev->stats.tx_heartbeat_errors++;
1963			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1964			    (txstatus & 0x0800) || (txstatus & 0x1000)) {
1965				dev->stats.tx_fifo_errors++;
1966				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1967				break; /* Keep the skb - we try again */
1968			}
1969			/* Transmitter restarted in 'abnormal' handler. */
1970		} else {
1971			if (rp->quirks & rqRhineI)
1972				dev->stats.collisions += (txstatus >> 3) & 0x0F;
1973			else
1974				dev->stats.collisions += txstatus & 0x0F;
1975			netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1976				  (txstatus >> 3) & 0xF, txstatus & 0xF);
1977
1978			u64_stats_update_begin(&rp->tx_stats.syncp);
1979			rp->tx_stats.bytes += skb->len;
1980			rp->tx_stats.packets++;
1981			u64_stats_update_end(&rp->tx_stats.syncp);
1982		}
1983		/* Free the original skb. */
1984		if (rp->tx_skbuff_dma[entry]) {
1985			dma_unmap_single(hwdev,
1986					 rp->tx_skbuff_dma[entry],
1987					 skb->len,
1988					 DMA_TO_DEVICE);
1989		}
1990		bytes_compl += skb->len;
1991		pkts_compl++;
1992		dev_consume_skb_any(skb);
1993		rp->tx_skbuff[entry] = NULL;
1994		dirty_tx++;
1995	}
1996
1997	rp->dirty_tx = dirty_tx;
1998	/* Pity we can't rely on the nearby BQL completion implicit barrier. */
1999	smp_wmb();
2000
2001	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2002
2003	/* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
2004	if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2005		netif_wake_queue(dev);
2006		smp_rmb();
2007		/* Rejuvenate. */
2008		if (rhine_tx_queue_full(rp))
2009			netif_stop_queue(dev);
2010	}
2011}
2012
2013/**
2014 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2015 * @skb: pointer to sk_buff
2016 * @data_size: used data area of the buffer including CRC
2017 *
2018 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2019 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2020 * aligned following the CRC.
2021 */
2022static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2023{
2024	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2025	return be16_to_cpup((__be16 *)trailer);
2026}
2027
2028static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2029				     int data_size)
2030{
2031	dma_rmb();
2032	if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2033		u16 vlan_tci;
2034
2035		vlan_tci = rhine_get_vlan_tci(skb, data_size);
2036		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2037	}
2038}
2039
2040/* Process up to limit frames from receive ring */
2041static int rhine_rx(struct net_device *dev, int limit)
2042{
2043	struct rhine_private *rp = netdev_priv(dev);
2044	struct device *hwdev = dev->dev.parent;
2045	int entry = rp->cur_rx % RX_RING_SIZE;
2046	int count;
2047
2048	netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2049		  entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2050
2051	/* If EOP is set on the next entry, it's a new packet. Send it up. */
2052	for (count = 0; count < limit; ++count) {
2053		struct rx_desc *desc = rp->rx_ring + entry;
2054		u32 desc_status = le32_to_cpu(desc->rx_status);
2055		int data_size = desc_status >> 16;
2056
2057		if (desc_status & DescOwn)
2058			break;
2059
2060		netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2061			  desc_status);
2062
2063		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2064			if ((desc_status & RxWholePkt) != RxWholePkt) {
2065				netdev_warn(dev,
2066	"Oversized Ethernet frame spanned multiple buffers, "
2067	"entry %#x length %d status %08x!\n",
2068					    entry, data_size,
2069					    desc_status);
2070				dev->stats.rx_length_errors++;
2071			} else if (desc_status & RxErr) {
2072				/* There was a error. */
2073				netif_dbg(rp, rx_err, dev,
2074					  "%s() Rx error %08x\n", __func__,
2075					  desc_status);
2076				dev->stats.rx_errors++;
2077				if (desc_status & 0x0030)
2078					dev->stats.rx_length_errors++;
2079				if (desc_status & 0x0048)
2080					dev->stats.rx_fifo_errors++;
2081				if (desc_status & 0x0004)
2082					dev->stats.rx_frame_errors++;
2083				if (desc_status & 0x0002) {
2084					/* this can also be updated outside the interrupt handler */
2085					spin_lock(&rp->lock);
2086					dev->stats.rx_crc_errors++;
2087					spin_unlock(&rp->lock);
2088				}
2089			}
2090		} else {
2091			/* Length should omit the CRC */
2092			int pkt_len = data_size - 4;
2093			struct sk_buff *skb;
2094
2095			/* Check if the packet is long enough to accept without
2096			   copying to a minimally-sized skbuff. */
2097			if (pkt_len < rx_copybreak) {
2098				skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2099				if (unlikely(!skb))
2100					goto drop;
2101
2102				dma_sync_single_for_cpu(hwdev,
2103							rp->rx_skbuff_dma[entry],
2104							rp->rx_buf_sz,
2105							DMA_FROM_DEVICE);
2106
2107				skb_copy_to_linear_data(skb,
2108						 rp->rx_skbuff[entry]->data,
2109						 pkt_len);
2110
2111				dma_sync_single_for_device(hwdev,
2112							   rp->rx_skbuff_dma[entry],
2113							   rp->rx_buf_sz,
2114							   DMA_FROM_DEVICE);
2115			} else {
2116				struct rhine_skb_dma sd;
2117
2118				if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2119					goto drop;
2120
2121				skb = rp->rx_skbuff[entry];
2122
2123				dma_unmap_single(hwdev,
2124						 rp->rx_skbuff_dma[entry],
2125						 rp->rx_buf_sz,
2126						 DMA_FROM_DEVICE);
2127				rhine_skb_dma_nic_store(rp, &sd, entry);
2128			}
2129
2130			skb_put(skb, pkt_len);
2131
2132			rhine_rx_vlan_tag(skb, desc, data_size);
2133
2134			skb->protocol = eth_type_trans(skb, dev);
2135
2136			netif_receive_skb(skb);
2137
2138			u64_stats_update_begin(&rp->rx_stats.syncp);
2139			rp->rx_stats.bytes += pkt_len;
2140			rp->rx_stats.packets++;
2141			u64_stats_update_end(&rp->rx_stats.syncp);
2142		}
2143give_descriptor_to_nic:
2144		desc->rx_status = cpu_to_le32(DescOwn);
2145		entry = (++rp->cur_rx) % RX_RING_SIZE;
2146	}
2147
2148	return count;
2149
2150drop:
2151	dev->stats.rx_dropped++;
2152	goto give_descriptor_to_nic;
2153}
2154
2155static void rhine_restart_tx(struct net_device *dev) {
2156	struct rhine_private *rp = netdev_priv(dev);
2157	void __iomem *ioaddr = rp->base;
2158	int entry = rp->dirty_tx % TX_RING_SIZE;
2159	u32 intr_status;
2160
2161	/*
2162	 * If new errors occurred, we need to sort them out before doing Tx.
2163	 * In that case the ISR will be back here RSN anyway.
2164	 */
2165	intr_status = rhine_get_events(rp);
2166
2167	if ((intr_status & IntrTxErrSummary) == 0) {
2168
2169		/* We know better than the chip where it should continue. */
2170		iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2171		       ioaddr + TxRingPtr);
2172
2173		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2174		       ioaddr + ChipCmd);
2175
2176		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2177			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2178			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2179
2180		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2181		       ioaddr + ChipCmd1);
2182		IOSYNC;
2183	}
2184	else {
2185		/* This should never happen */
2186		netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2187			   intr_status);
2188	}
2189
2190}
2191
2192static void rhine_slow_event_task(struct work_struct *work)
2193{
2194	struct rhine_private *rp =
2195		container_of(work, struct rhine_private, slow_event_task);
2196	struct net_device *dev = rp->dev;
2197	u32 intr_status;
2198
2199	mutex_lock(&rp->task_lock);
2200
2201	if (!rp->task_enable)
2202		goto out_unlock;
2203
2204	intr_status = rhine_get_events(rp);
2205	rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2206
2207	if (intr_status & IntrLinkChange)
2208		rhine_check_media(dev, 0);
2209
2210	if (intr_status & IntrPCIErr)
2211		netif_warn(rp, hw, dev, "PCI error\n");
2212
2213	iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2214
2215out_unlock:
2216	mutex_unlock(&rp->task_lock);
2217}
2218
2219static void
2220rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2221{
2222	struct rhine_private *rp = netdev_priv(dev);
2223	unsigned int start;
2224
2225	spin_lock_bh(&rp->lock);
2226	rhine_update_rx_crc_and_missed_errord(rp);
2227	spin_unlock_bh(&rp->lock);
2228
2229	netdev_stats_to_stats64(stats, &dev->stats);
2230
2231	do {
2232		start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2233		stats->rx_packets = rp->rx_stats.packets;
2234		stats->rx_bytes = rp->rx_stats.bytes;
2235	} while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2236
2237	do {
2238		start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2239		stats->tx_packets = rp->tx_stats.packets;
2240		stats->tx_bytes = rp->tx_stats.bytes;
2241	} while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2242}
2243
2244static void rhine_set_rx_mode(struct net_device *dev)
2245{
2246	struct rhine_private *rp = netdev_priv(dev);
2247	void __iomem *ioaddr = rp->base;
2248	u32 mc_filter[2];	/* Multicast hash filter */
2249	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
2250	struct netdev_hw_addr *ha;
2251
2252	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
2253		rx_mode = 0x1C;
2254		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2255		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2256	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2257		   (dev->flags & IFF_ALLMULTI)) {
2258		/* Too many to match, or accept all multicasts. */
2259		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2260		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2261	} else if (rp->quirks & rqMgmt) {
2262		int i = 0;
2263		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
2264		netdev_for_each_mc_addr(ha, dev) {
2265			if (i == MCAM_SIZE)
2266				break;
2267			rhine_set_cam(ioaddr, i, ha->addr);
2268			mCAMmask |= 1 << i;
2269			i++;
2270		}
2271		rhine_set_cam_mask(ioaddr, mCAMmask);
2272	} else {
2273		memset(mc_filter, 0, sizeof(mc_filter));
2274		netdev_for_each_mc_addr(ha, dev) {
2275			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2276
2277			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2278		}
2279		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2280		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2281	}
2282	/* enable/disable VLAN receive filtering */
2283	if (rp->quirks & rqMgmt) {
2284		if (dev->flags & IFF_PROMISC)
2285			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2286		else
2287			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2288	}
2289	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2290}
2291
2292static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2293{
2294	struct device *hwdev = dev->dev.parent;
2295
2296	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2297	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2298	strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2299}
2300
2301static int netdev_get_link_ksettings(struct net_device *dev,
2302				     struct ethtool_link_ksettings *cmd)
2303{
2304	struct rhine_private *rp = netdev_priv(dev);
2305
2306	mutex_lock(&rp->task_lock);
2307	mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2308	mutex_unlock(&rp->task_lock);
2309
2310	return 0;
2311}
2312
2313static int netdev_set_link_ksettings(struct net_device *dev,
2314				     const struct ethtool_link_ksettings *cmd)
2315{
2316	struct rhine_private *rp = netdev_priv(dev);
2317	int rc;
2318
2319	mutex_lock(&rp->task_lock);
2320	rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2321	rhine_set_carrier(&rp->mii_if);
2322	mutex_unlock(&rp->task_lock);
2323
2324	return rc;
2325}
2326
2327static int netdev_nway_reset(struct net_device *dev)
2328{
2329	struct rhine_private *rp = netdev_priv(dev);
2330
2331	return mii_nway_restart(&rp->mii_if);
2332}
2333
2334static u32 netdev_get_link(struct net_device *dev)
2335{
2336	struct rhine_private *rp = netdev_priv(dev);
2337
2338	return mii_link_ok(&rp->mii_if);
2339}
2340
2341static u32 netdev_get_msglevel(struct net_device *dev)
2342{
2343	struct rhine_private *rp = netdev_priv(dev);
2344
2345	return rp->msg_enable;
2346}
2347
2348static void netdev_set_msglevel(struct net_device *dev, u32 value)
2349{
2350	struct rhine_private *rp = netdev_priv(dev);
2351
2352	rp->msg_enable = value;
2353}
2354
2355static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2356{
2357	struct rhine_private *rp = netdev_priv(dev);
2358
2359	if (!(rp->quirks & rqWOL))
2360		return;
2361
2362	spin_lock_irq(&rp->lock);
2363	wol->supported = WAKE_PHY | WAKE_MAGIC |
2364			 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2365	wol->wolopts = rp->wolopts;
2366	spin_unlock_irq(&rp->lock);
2367}
2368
2369static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2370{
2371	struct rhine_private *rp = netdev_priv(dev);
2372	u32 support = WAKE_PHY | WAKE_MAGIC |
2373		      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;	/* Untested */
2374
2375	if (!(rp->quirks & rqWOL))
2376		return -EINVAL;
2377
2378	if (wol->wolopts & ~support)
2379		return -EINVAL;
2380
2381	spin_lock_irq(&rp->lock);
2382	rp->wolopts = wol->wolopts;
2383	spin_unlock_irq(&rp->lock);
2384
2385	return 0;
2386}
2387
2388static const struct ethtool_ops netdev_ethtool_ops = {
2389	.get_drvinfo		= netdev_get_drvinfo,
2390	.nway_reset		= netdev_nway_reset,
2391	.get_link		= netdev_get_link,
2392	.get_msglevel		= netdev_get_msglevel,
2393	.set_msglevel		= netdev_set_msglevel,
2394	.get_wol		= rhine_get_wol,
2395	.set_wol		= rhine_set_wol,
2396	.get_link_ksettings	= netdev_get_link_ksettings,
2397	.set_link_ksettings	= netdev_set_link_ksettings,
2398};
2399
2400static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2401{
2402	struct rhine_private *rp = netdev_priv(dev);
2403	int rc;
2404
2405	if (!netif_running(dev))
2406		return -EINVAL;
2407
2408	mutex_lock(&rp->task_lock);
2409	rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2410	rhine_set_carrier(&rp->mii_if);
2411	mutex_unlock(&rp->task_lock);
2412
2413	return rc;
2414}
2415
2416static int rhine_close(struct net_device *dev)
2417{
2418	struct rhine_private *rp = netdev_priv(dev);
2419	void __iomem *ioaddr = rp->base;
2420
2421	rhine_task_disable(rp);
2422	napi_disable(&rp->napi);
2423	netif_stop_queue(dev);
2424
2425	netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2426		  ioread16(ioaddr + ChipCmd));
2427
2428	/* Switch to loopback mode to avoid hardware races. */
2429	iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2430
2431	rhine_irq_disable(rp);
2432
2433	/* Stop the chip's Tx and Rx processes. */
2434	iowrite16(CmdStop, ioaddr + ChipCmd);
2435
2436	free_irq(rp->irq, dev);
2437	free_rbufs(dev);
2438	free_tbufs(dev);
2439	free_ring(dev);
2440
2441	return 0;
2442}
2443
2444
2445static void rhine_remove_one_pci(struct pci_dev *pdev)
2446{
2447	struct net_device *dev = pci_get_drvdata(pdev);
2448	struct rhine_private *rp = netdev_priv(dev);
2449
2450	unregister_netdev(dev);
2451
2452	pci_iounmap(pdev, rp->base);
2453	pci_release_regions(pdev);
2454
2455	free_netdev(dev);
2456	pci_disable_device(pdev);
2457}
2458
2459static int rhine_remove_one_platform(struct platform_device *pdev)
2460{
2461	struct net_device *dev = platform_get_drvdata(pdev);
2462	struct rhine_private *rp = netdev_priv(dev);
2463
2464	unregister_netdev(dev);
2465
2466	iounmap(rp->base);
2467
2468	free_netdev(dev);
2469
2470	return 0;
2471}
2472
2473static void rhine_shutdown_pci(struct pci_dev *pdev)
2474{
2475	struct net_device *dev = pci_get_drvdata(pdev);
2476	struct rhine_private *rp = netdev_priv(dev);
2477	void __iomem *ioaddr = rp->base;
2478
2479	if (!(rp->quirks & rqWOL))
2480		return; /* Nothing to do for non-WOL adapters */
2481
2482	rhine_power_init(dev);
2483
2484	/* Make sure we use pattern 0, 1 and not 4, 5 */
2485	if (rp->quirks & rq6patterns)
2486		iowrite8(0x04, ioaddr + WOLcgClr);
2487
2488	spin_lock(&rp->lock);
2489
2490	if (rp->wolopts & WAKE_MAGIC) {
2491		iowrite8(WOLmagic, ioaddr + WOLcrSet);
2492		/*
2493		 * Turn EEPROM-controlled wake-up back on -- some hardware may
2494		 * not cooperate otherwise.
2495		 */
2496		iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2497	}
2498
2499	if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2500		iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2501
2502	if (rp->wolopts & WAKE_PHY)
2503		iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2504
2505	if (rp->wolopts & WAKE_UCAST)
2506		iowrite8(WOLucast, ioaddr + WOLcrSet);
2507
2508	if (rp->wolopts) {
2509		/* Enable legacy WOL (for old motherboards) */
2510		iowrite8(0x01, ioaddr + PwcfgSet);
2511		iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2512	}
2513
2514	spin_unlock(&rp->lock);
2515
2516	if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2517		iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2518
2519		pci_wake_from_d3(pdev, true);
2520		pci_set_power_state(pdev, PCI_D3hot);
2521	}
2522}
2523
2524#ifdef CONFIG_PM_SLEEP
2525static int rhine_suspend(struct device *device)
2526{
2527	struct net_device *dev = dev_get_drvdata(device);
2528	struct rhine_private *rp = netdev_priv(dev);
2529
2530	if (!netif_running(dev))
2531		return 0;
2532
2533	rhine_task_disable(rp);
2534	rhine_irq_disable(rp);
2535	napi_disable(&rp->napi);
2536
2537	netif_device_detach(dev);
2538
2539	if (dev_is_pci(device))
2540		rhine_shutdown_pci(to_pci_dev(device));
2541
2542	return 0;
2543}
2544
2545static int rhine_resume(struct device *device)
2546{
2547	struct net_device *dev = dev_get_drvdata(device);
2548	struct rhine_private *rp = netdev_priv(dev);
2549
2550	if (!netif_running(dev))
2551		return 0;
2552
2553	enable_mmio(rp->pioaddr, rp->quirks);
2554	rhine_power_init(dev);
2555	free_tbufs(dev);
2556	alloc_tbufs(dev);
2557	rhine_reset_rbufs(rp);
2558	rhine_task_enable(rp);
2559	spin_lock_bh(&rp->lock);
2560	init_registers(dev);
2561	spin_unlock_bh(&rp->lock);
2562
2563	netif_device_attach(dev);
2564
2565	return 0;
2566}
2567
2568static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2569#define RHINE_PM_OPS	(&rhine_pm_ops)
2570
2571#else
2572
2573#define RHINE_PM_OPS	NULL
2574
2575#endif /* !CONFIG_PM_SLEEP */
2576
2577static struct pci_driver rhine_driver_pci = {
2578	.name		= DRV_NAME,
2579	.id_table	= rhine_pci_tbl,
2580	.probe		= rhine_init_one_pci,
2581	.remove		= rhine_remove_one_pci,
2582	.shutdown	= rhine_shutdown_pci,
2583	.driver.pm	= RHINE_PM_OPS,
2584};
2585
2586static struct platform_driver rhine_driver_platform = {
2587	.probe		= rhine_init_one_platform,
2588	.remove		= rhine_remove_one_platform,
2589	.driver = {
2590		.name	= DRV_NAME,
2591		.of_match_table	= rhine_of_tbl,
2592		.pm		= RHINE_PM_OPS,
2593	}
2594};
2595
2596static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2597	{
2598		.ident = "EPIA-M",
2599		.matches = {
2600			DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2601			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2602		},
2603	},
2604	{
2605		.ident = "KV7",
2606		.matches = {
2607			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2608			DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2609		},
2610	},
2611	{ NULL }
2612};
2613
2614static int __init rhine_init(void)
2615{
2616	int ret_pci, ret_platform;
2617
2618/* when a module, this is printed whether or not devices are found in probe */
2619#ifdef MODULE
2620	pr_info("%s\n", version);
2621#endif
2622	if (dmi_check_system(rhine_dmi_table)) {
2623		/* these BIOSes fail at PXE boot if chip is in D3 */
2624		avoid_D3 = true;
2625		pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2626	}
2627	else if (avoid_D3)
2628		pr_info("avoid_D3 set\n");
2629
2630	ret_pci = pci_register_driver(&rhine_driver_pci);
2631	ret_platform = platform_driver_register(&rhine_driver_platform);
2632	if ((ret_pci < 0) && (ret_platform < 0))
2633		return ret_pci;
2634
2635	return 0;
2636}
2637
2638
2639static void __exit rhine_cleanup(void)
2640{
2641	platform_driver_unregister(&rhine_driver_platform);
2642	pci_unregister_driver(&rhine_driver_pci);
2643}
2644
2645
2646module_init(rhine_init);
2647module_exit(rhine_cleanup);