Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v6.8.
   1/*  Silan SC92031 PCI Fast Ethernet Adapter driver
   2 *
   3 *  Based on vendor drivers:
   4 *  Silan Fast Ethernet Netcard Driver:
   5 *    MODULE_AUTHOR ("gaoyonghong");
   6 *    MODULE_DESCRIPTION ("SILAN Fast Ethernet driver");
   7 *    MODULE_LICENSE("GPL");
   8 *  8139D Fast Ethernet driver:
   9 *    (C) 2002 by gaoyonghong
  10 *    MODULE_AUTHOR ("gaoyonghong");
  11 *    MODULE_DESCRIPTION ("Rsltek 8139D PCI Fast Ethernet Adapter driver");
  12 *    MODULE_LICENSE("GPL");
  13 *  Both are almost identical and seem to be based on pci-skeleton.c
  14 *
  15 *  Rewritten for 2.6 by Cesar Eduardo Barros
  16 *
  17 *  A datasheet for this chip can be found at
  18 *  http://www.silan.com.cn/english/product/pdf/SC92031AY.pdf 
  19 */
  20
  21/* Note about set_mac_address: I don't know how to change the hardware
  22 * matching, so you need to enable IFF_PROMISC when using it.
  23 */
  24
  25#include <linux/interrupt.h>
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/delay.h>
  29#include <linux/pci.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/netdevice.h>
  32#include <linux/etherdevice.h>
  33#include <linux/ethtool.h>
  34#include <linux/crc32.h>
  35
  36#include <asm/irq.h>
  37
  38#define SC92031_NAME "sc92031"
  39
  40/* BAR 0 is MMIO, BAR 1 is PIO */
  41#ifndef SC92031_USE_BAR
  42#define SC92031_USE_BAR 0
  43#endif
  44
  45/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
  46static int multicast_filter_limit = 64;
  47module_param(multicast_filter_limit, int, 0);
  48MODULE_PARM_DESC(multicast_filter_limit,
  49	"Maximum number of filtered multicast addresses");
  50
  51static int media;
  52module_param(media, int, 0);
  53MODULE_PARM_DESC(media, "Media type (0x00 = autodetect,"
  54	" 0x01 = 10M half, 0x02 = 10M full,"
  55	" 0x04 = 100M half, 0x08 = 100M full)");
  56
  57/* Size of the in-memory receive ring. */
  58#define  RX_BUF_LEN_IDX  3 /* 0==8K, 1==16K, 2==32K, 3==64K ,4==128K*/
  59#define  RX_BUF_LEN	(8192 << RX_BUF_LEN_IDX)
  60
  61/* Number of Tx descriptor registers. */
  62#define  NUM_TX_DESC	   4
  63
  64/* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
  65#define  MAX_ETH_FRAME_SIZE	  1536
  66
  67/* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
  68#define  TX_BUF_SIZE       MAX_ETH_FRAME_SIZE
  69#define  TX_BUF_TOT_LEN    (TX_BUF_SIZE * NUM_TX_DESC)
  70
  71/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
  72#define  RX_FIFO_THRESH    7     /* Rx buffer level before first PCI xfer.  */
  73
  74/* Time in jiffies before concluding the transmitter is hung. */
  75#define  TX_TIMEOUT     (4*HZ)
  76
  77#define  SILAN_STATS_NUM    2    /* number of ETHTOOL_GSTATS */
  78
  79/* media options */
  80#define  AUTOSELECT    0x00
  81#define  M10_HALF      0x01
  82#define  M10_FULL      0x02
  83#define  M100_HALF     0x04
  84#define  M100_FULL     0x08
  85
  86 /* Symbolic offsets to registers. */
  87enum  silan_registers {
  88   Config0    = 0x00,         // Config0
  89   Config1    = 0x04,         // Config1
  90   RxBufWPtr  = 0x08,         // Rx buffer writer poiter
  91   IntrStatus = 0x0C,         // Interrupt status
  92   IntrMask   = 0x10,         // Interrupt mask
  93   RxbufAddr  = 0x14,         // Rx buffer start address
  94   RxBufRPtr  = 0x18,         // Rx buffer read pointer
  95   Txstatusall = 0x1C,        // Transmit status of all descriptors
  96   TxStatus0  = 0x20,	      // Transmit status (Four 32bit registers).
  97   TxAddr0    = 0x30,         // Tx descriptors (also four 32bit).
  98   RxConfig   = 0x40,         // Rx configuration
  99   MAC0	      = 0x44,	      // Ethernet hardware address.
 100   MAR0	      = 0x4C,	      // Multicast filter.
 101   RxStatus0  = 0x54,         // Rx status
 102   TxConfig   = 0x5C,         // Tx configuration
 103   PhyCtrl    = 0x60,         // physical control
 104   FlowCtrlConfig = 0x64,     // flow control
 105   Miicmd0    = 0x68,         // Mii command0 register
 106   Miicmd1    = 0x6C,         // Mii command1 register
 107   Miistatus  = 0x70,         // Mii status register
 108   Timercnt   = 0x74,         // Timer counter register
 109   TimerIntr  = 0x78,         // Timer interrupt register
 110   PMConfig   = 0x7C,         // Power Manager configuration
 111   CRC0       = 0x80,         // Power Manager CRC ( Two 32bit regisers)
 112   Wakeup0    = 0x88,         // power Manager wakeup( Eight 64bit regiser)
 113   LSBCRC0    = 0xC8,         // power Manager LSBCRC(Two 32bit regiser)
 114   TestD0     = 0xD0,
 115   TestD4     = 0xD4,
 116   TestD8     = 0xD8,
 117};
 118
 119#define MII_BMCR            0        // Basic mode control register
 120#define MII_BMSR            1        // Basic mode status register
 121#define MII_JAB             16
 122#define MII_OutputStatus    24
 123
 124#define BMCR_FULLDPLX       0x0100    // Full duplex
 125#define BMCR_ANRESTART      0x0200    // Auto negotiation restart
 126#define BMCR_ANENABLE       0x1000    // Enable auto negotiation
 127#define BMCR_SPEED100       0x2000    // Select 100Mbps
 128#define BMSR_LSTATUS        0x0004    // Link status
 129#define PHY_16_JAB_ENB      0x1000
 130#define PHY_16_PORT_ENB     0x1
 131
 132enum IntrStatusBits {
 133   LinkFail       = 0x80000000,
 134   LinkOK         = 0x40000000,
 135   TimeOut        = 0x20000000,
 136   RxOverflow     = 0x0040,
 137   RxOK           = 0x0020,
 138   TxOK           = 0x0001,
 139   IntrBits = LinkFail|LinkOK|TimeOut|RxOverflow|RxOK|TxOK,
 140};
 141
 142enum TxStatusBits {
 143   TxCarrierLost = 0x20000000,
 144   TxAborted     = 0x10000000,
 145   TxOutOfWindow = 0x08000000,
 146   TxNccShift    = 22,
 147   EarlyTxThresShift = 16,
 148   TxStatOK      = 0x8000,
 149   TxUnderrun    = 0x4000,
 150   TxOwn         = 0x2000,
 151};
 152
 153enum RxStatusBits {
 154   RxStatesOK   = 0x80000,
 155   RxBadAlign   = 0x40000,
 156   RxHugeFrame  = 0x20000,
 157   RxSmallFrame = 0x10000,
 158   RxCRCOK      = 0x8000,
 159   RxCrlFrame   = 0x4000,
 160   Rx_Broadcast = 0x2000,
 161   Rx_Multicast = 0x1000,
 162   RxAddrMatch  = 0x0800,
 163   MiiErr       = 0x0400,
 164};
 165
 166enum RxConfigBits {
 167   RxFullDx    = 0x80000000,
 168   RxEnb       = 0x40000000,
 169   RxSmall     = 0x20000000,
 170   RxHuge      = 0x10000000,
 171   RxErr       = 0x08000000,
 172   RxAllphys   = 0x04000000,
 173   RxMulticast = 0x02000000,
 174   RxBroadcast = 0x01000000,
 175   RxLoopBack  = (1 << 23) | (1 << 22),
 176   LowThresholdShift  = 12,
 177   HighThresholdShift = 2,
 178};
 179
 180enum TxConfigBits {
 181   TxFullDx       = 0x80000000,
 182   TxEnb          = 0x40000000,
 183   TxEnbPad       = 0x20000000,
 184   TxEnbHuge      = 0x10000000,
 185   TxEnbFCS       = 0x08000000,
 186   TxNoBackOff    = 0x04000000,
 187   TxEnbPrem      = 0x02000000,
 188   TxCareLostCrs  = 0x1000000,
 189   TxExdCollNum   = 0xf00000,
 190   TxDataRate     = 0x80000,
 191};
 192
 193enum PhyCtrlconfigbits {
 194   PhyCtrlAne         = 0x80000000,
 195   PhyCtrlSpd100      = 0x40000000,
 196   PhyCtrlSpd10       = 0x20000000,
 197   PhyCtrlPhyBaseAddr = 0x1f000000,
 198   PhyCtrlDux         = 0x800000,
 199   PhyCtrlReset       = 0x400000,
 200};
 201
 202enum FlowCtrlConfigBits {
 203   FlowCtrlFullDX = 0x80000000,
 204   FlowCtrlEnb    = 0x40000000,
 205};
 206
 207enum Config0Bits {
 208   Cfg0_Reset  = 0x80000000,
 209   Cfg0_Anaoff = 0x40000000,
 210   Cfg0_LDPS   = 0x20000000,
 211};
 212
 213enum Config1Bits {
 214   Cfg1_EarlyRx = 1 << 31,
 215   Cfg1_EarlyTx = 1 << 30,
 216
 217   //rx buffer size
 218   Cfg1_Rcv8K   = 0x0,
 219   Cfg1_Rcv16K  = 0x1,
 220   Cfg1_Rcv32K  = 0x3,
 221   Cfg1_Rcv64K  = 0x7,
 222   Cfg1_Rcv128K = 0xf,
 223};
 224
 225enum MiiCmd0Bits {
 226   Mii_Divider = 0x20000000,
 227   Mii_WRITE   = 0x400000,
 228   Mii_READ    = 0x200000,
 229   Mii_SCAN    = 0x100000,
 230   Mii_Tamod   = 0x80000,
 231   Mii_Drvmod  = 0x40000,
 232   Mii_mdc     = 0x20000,
 233   Mii_mdoen   = 0x10000,
 234   Mii_mdo     = 0x8000,
 235   Mii_mdi     = 0x4000,
 236};
 237
 238enum MiiStatusBits {
 239    Mii_StatusBusy = 0x80000000,
 240};
 241
 242enum PMConfigBits {
 243   PM_Enable  = 1 << 31,
 244   PM_LongWF  = 1 << 30,
 245   PM_Magic   = 1 << 29,
 246   PM_LANWake = 1 << 28,
 247   PM_LWPTN   = (1 << 27 | 1<< 26),
 248   PM_LinkUp  = 1 << 25,
 249   PM_WakeUp  = 1 << 24,
 250};
 251
 252/* Locking rules:
 253 * priv->lock protects most of the fields of priv and most of the
 254 * hardware registers. It does not have to protect against softirqs
 255 * between sc92031_disable_interrupts and sc92031_enable_interrupts;
 256 * it also does not need to be used in ->open and ->stop while the
 257 * device interrupts are off.
 258 * Not having to protect against softirqs is very useful due to heavy
 259 * use of mdelay() at _sc92031_reset.
 260 * Functions prefixed with _sc92031_ must be called with the lock held;
 261 * functions prefixed with sc92031_ must be called without the lock held.
 262 * Use mmiowb() before unlocking if the hardware was written to.
 263 */
 264
 265/* Locking rules for the interrupt:
 266 * - the interrupt and the tasklet never run at the same time
 267 * - neither run between sc92031_disable_interrupts and
 268 *   sc92031_enable_interrupt
 269 */
 270
 271struct sc92031_priv {
 272	spinlock_t		lock;
 273	/* iomap.h cookie */
 274	void __iomem		*port_base;
 275	/* pci device structure */
 276	struct pci_dev		*pdev;
 277	/* tasklet */
 278	struct tasklet_struct	tasklet;
 279
 280	/* CPU address of rx ring */
 281	void			*rx_ring;
 282	/* PCI address of rx ring */
 283	dma_addr_t		rx_ring_dma_addr;
 284	/* PCI address of rx ring read pointer */
 285	dma_addr_t		rx_ring_tail;
 286
 287	/* tx ring write index */
 288	unsigned		tx_head;
 289	/* tx ring read index */
 290	unsigned		tx_tail;
 291	/* CPU address of tx bounce buffer */
 292	void			*tx_bufs;
 293	/* PCI address of tx bounce buffer */
 294	dma_addr_t		tx_bufs_dma_addr;
 295
 296	/* copies of some hardware registers */
 297	u32			intr_status;
 298	atomic_t		intr_mask;
 299	u32			rx_config;
 300	u32			tx_config;
 301	u32			pm_config;
 302
 303	/* copy of some flags from dev->flags */
 304	unsigned int		mc_flags;
 305
 306	/* for ETHTOOL_GSTATS */
 307	u64			tx_timeouts;
 308	u64			rx_loss;
 309
 310	/* for dev->get_stats */
 311	long			rx_value;
 312};
 313
 314/* I don't know which registers can be safely read; however, I can guess
 315 * MAC0 is one of them. */
 316static inline void _sc92031_dummy_read(void __iomem *port_base)
 317{
 318	ioread32(port_base + MAC0);
 319}
 320
 321static u32 _sc92031_mii_wait(void __iomem *port_base)
 322{
 323	u32 mii_status;
 324
 325	do {
 326		udelay(10);
 327		mii_status = ioread32(port_base + Miistatus);
 328	} while (mii_status & Mii_StatusBusy);
 329
 330	return mii_status;
 331}
 332
 333static u32 _sc92031_mii_cmd(void __iomem *port_base, u32 cmd0, u32 cmd1)
 334{
 335	iowrite32(Mii_Divider, port_base + Miicmd0);
 336
 337	_sc92031_mii_wait(port_base);
 338
 339	iowrite32(cmd1, port_base + Miicmd1);
 340	iowrite32(Mii_Divider | cmd0, port_base + Miicmd0);
 341
 342	return _sc92031_mii_wait(port_base);
 343}
 344
 345static void _sc92031_mii_scan(void __iomem *port_base)
 346{
 347	_sc92031_mii_cmd(port_base, Mii_SCAN, 0x1 << 6);
 348}
 349
 350static u16 _sc92031_mii_read(void __iomem *port_base, unsigned reg)
 351{
 352	return _sc92031_mii_cmd(port_base, Mii_READ, reg << 6) >> 13;
 353}
 354
 355static void _sc92031_mii_write(void __iomem *port_base, unsigned reg, u16 val)
 356{
 357	_sc92031_mii_cmd(port_base, Mii_WRITE, (reg << 6) | ((u32)val << 11));
 358}
 359
 360static void sc92031_disable_interrupts(struct net_device *dev)
 361{
 362	struct sc92031_priv *priv = netdev_priv(dev);
 363	void __iomem *port_base = priv->port_base;
 364
 365	/* tell the tasklet/interrupt not to enable interrupts */
 366	atomic_set(&priv->intr_mask, 0);
 367	wmb();
 368
 369	/* stop interrupts */
 370	iowrite32(0, port_base + IntrMask);
 371	_sc92031_dummy_read(port_base);
 372	mmiowb();
 373
 374	/* wait for any concurrent interrupt/tasklet to finish */
 375	synchronize_irq(dev->irq);
 376	tasklet_disable(&priv->tasklet);
 377}
 378
 379static void sc92031_enable_interrupts(struct net_device *dev)
 380{
 381	struct sc92031_priv *priv = netdev_priv(dev);
 382	void __iomem *port_base = priv->port_base;
 383
 384	tasklet_enable(&priv->tasklet);
 385
 386	atomic_set(&priv->intr_mask, IntrBits);
 387	wmb();
 388
 389	iowrite32(IntrBits, port_base + IntrMask);
 390	mmiowb();
 391}
 392
 393static void _sc92031_disable_tx_rx(struct net_device *dev)
 394{
 395	struct sc92031_priv *priv = netdev_priv(dev);
 396	void __iomem *port_base = priv->port_base;
 397
 398	priv->rx_config &= ~RxEnb;
 399	priv->tx_config &= ~TxEnb;
 400	iowrite32(priv->rx_config, port_base + RxConfig);
 401	iowrite32(priv->tx_config, port_base + TxConfig);
 402}
 403
 404static void _sc92031_enable_tx_rx(struct net_device *dev)
 405{
 406	struct sc92031_priv *priv = netdev_priv(dev);
 407	void __iomem *port_base = priv->port_base;
 408
 409	priv->rx_config |= RxEnb;
 410	priv->tx_config |= TxEnb;
 411	iowrite32(priv->rx_config, port_base + RxConfig);
 412	iowrite32(priv->tx_config, port_base + TxConfig);
 413}
 414
 415static void _sc92031_tx_clear(struct net_device *dev)
 416{
 417	struct sc92031_priv *priv = netdev_priv(dev);
 418
 419	while (priv->tx_head - priv->tx_tail > 0) {
 420		priv->tx_tail++;
 421		dev->stats.tx_dropped++;
 422	}
 423	priv->tx_head = priv->tx_tail = 0;
 424}
 425
 426static void _sc92031_set_mar(struct net_device *dev)
 427{
 428	struct sc92031_priv *priv = netdev_priv(dev);
 429	void __iomem *port_base = priv->port_base;
 430	u32 mar0 = 0, mar1 = 0;
 431
 432	if ((dev->flags & IFF_PROMISC) ||
 433	    netdev_mc_count(dev) > multicast_filter_limit ||
 434	    (dev->flags & IFF_ALLMULTI))
 435		mar0 = mar1 = 0xffffffff;
 436	else if (dev->flags & IFF_MULTICAST) {
 437		struct netdev_hw_addr *ha;
 438
 439		netdev_for_each_mc_addr(ha, dev) {
 440			u32 crc;
 441			unsigned bit = 0;
 442
 443			crc = ~ether_crc(ETH_ALEN, ha->addr);
 444			crc >>= 24;
 445
 446			if (crc & 0x01)	bit |= 0x02;
 447			if (crc & 0x02)	bit |= 0x01;
 448			if (crc & 0x10)	bit |= 0x20;
 449			if (crc & 0x20)	bit |= 0x10;
 450			if (crc & 0x40)	bit |= 0x08;
 451			if (crc & 0x80)	bit |= 0x04;
 452
 453			if (bit > 31)
 454				mar0 |= 0x1 << (bit - 32);
 455			else
 456				mar1 |= 0x1 << bit;
 457		}
 458	}
 459
 460	iowrite32(mar0, port_base + MAR0);
 461	iowrite32(mar1, port_base + MAR0 + 4);
 462}
 463
 464static void _sc92031_set_rx_config(struct net_device *dev)
 465{
 466	struct sc92031_priv *priv = netdev_priv(dev);
 467	void __iomem *port_base = priv->port_base;
 468	unsigned int old_mc_flags;
 469	u32 rx_config_bits = 0;
 470
 471	old_mc_flags = priv->mc_flags;
 472
 473	if (dev->flags & IFF_PROMISC)
 474		rx_config_bits |= RxSmall | RxHuge | RxErr | RxBroadcast
 475				| RxMulticast | RxAllphys;
 476
 477	if (dev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
 478		rx_config_bits |= RxMulticast;
 479
 480	if (dev->flags & IFF_BROADCAST)
 481		rx_config_bits |= RxBroadcast;
 482
 483	priv->rx_config &= ~(RxSmall | RxHuge | RxErr | RxBroadcast
 484			| RxMulticast | RxAllphys);
 485	priv->rx_config |= rx_config_bits;
 486
 487	priv->mc_flags = dev->flags & (IFF_PROMISC | IFF_ALLMULTI
 488			| IFF_MULTICAST | IFF_BROADCAST);
 489
 490	if (netif_carrier_ok(dev) && priv->mc_flags != old_mc_flags)
 491		iowrite32(priv->rx_config, port_base + RxConfig);
 492}
 493
 494static bool _sc92031_check_media(struct net_device *dev)
 495{
 496	struct sc92031_priv *priv = netdev_priv(dev);
 497	void __iomem *port_base = priv->port_base;
 498	u16 bmsr;
 499
 500	bmsr = _sc92031_mii_read(port_base, MII_BMSR);
 501	rmb();
 502	if (bmsr & BMSR_LSTATUS) {
 503		bool speed_100, duplex_full;
 504		u32 flow_ctrl_config = 0;
 505		u16 output_status = _sc92031_mii_read(port_base,
 506				MII_OutputStatus);
 507		_sc92031_mii_scan(port_base);
 508
 509		speed_100 = output_status & 0x2;
 510		duplex_full = output_status & 0x4;
 511
 512		/* Initial Tx/Rx configuration */
 513		priv->rx_config = (0x40 << LowThresholdShift) | (0x1c0 << HighThresholdShift);
 514		priv->tx_config = 0x48800000;
 515
 516		/* NOTE: vendor driver had dead code here to enable tx padding */
 517
 518		if (!speed_100)
 519			priv->tx_config |= 0x80000;
 520
 521		// configure rx mode
 522		_sc92031_set_rx_config(dev);
 523
 524		if (duplex_full) {
 525			priv->rx_config |= RxFullDx;
 526			priv->tx_config |= TxFullDx;
 527			flow_ctrl_config = FlowCtrlFullDX | FlowCtrlEnb;
 528		} else {
 529			priv->rx_config &= ~RxFullDx;
 530			priv->tx_config &= ~TxFullDx;
 531		}
 532
 533		_sc92031_set_mar(dev);
 534		_sc92031_set_rx_config(dev);
 535		_sc92031_enable_tx_rx(dev);
 536		iowrite32(flow_ctrl_config, port_base + FlowCtrlConfig);
 537
 538		netif_carrier_on(dev);
 539
 540		if (printk_ratelimit())
 541			printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",
 542				dev->name,
 543				speed_100 ? "100" : "10",
 544				duplex_full ? "full" : "half");
 545		return true;
 546	} else {
 547		_sc92031_mii_scan(port_base);
 548
 549		netif_carrier_off(dev);
 550
 551		_sc92031_disable_tx_rx(dev);
 552
 553		if (printk_ratelimit())
 554			printk(KERN_INFO "%s: link down\n", dev->name);
 555		return false;
 556	}
 557}
 558
 559static void _sc92031_phy_reset(struct net_device *dev)
 560{
 561	struct sc92031_priv *priv = netdev_priv(dev);
 562	void __iomem *port_base = priv->port_base;
 563	u32 phy_ctrl;
 564
 565	phy_ctrl = ioread32(port_base + PhyCtrl);
 566	phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);
 567	phy_ctrl |= PhyCtrlAne | PhyCtrlReset;
 568
 569	switch (media) {
 570	default:
 571	case AUTOSELECT:
 572		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
 573		break;
 574	case M10_HALF:
 575		phy_ctrl |= PhyCtrlSpd10;
 576		break;
 577	case M10_FULL:
 578		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;
 579		break;
 580	case M100_HALF:
 581		phy_ctrl |= PhyCtrlSpd100;
 582		break;
 583	case M100_FULL:
 584		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
 585		break;
 586	}
 587
 588	iowrite32(phy_ctrl, port_base + PhyCtrl);
 589	mdelay(10);
 590
 591	phy_ctrl &= ~PhyCtrlReset;
 592	iowrite32(phy_ctrl, port_base + PhyCtrl);
 593	mdelay(1);
 594
 595	_sc92031_mii_write(port_base, MII_JAB,
 596			PHY_16_JAB_ENB | PHY_16_PORT_ENB);
 597	_sc92031_mii_scan(port_base);
 598
 599	netif_carrier_off(dev);
 600	netif_stop_queue(dev);
 601}
 602
 603static void _sc92031_reset(struct net_device *dev)
 604{
 605	struct sc92031_priv *priv = netdev_priv(dev);
 606	void __iomem *port_base = priv->port_base;
 607
 608	/* disable PM */
 609	iowrite32(0, port_base + PMConfig);
 610
 611	/* soft reset the chip */
 612	iowrite32(Cfg0_Reset, port_base + Config0);
 613	mdelay(200);
 614
 615	iowrite32(0, port_base + Config0);
 616	mdelay(10);
 617
 618	/* disable interrupts */
 619	iowrite32(0, port_base + IntrMask);
 620
 621	/* clear multicast address */
 622	iowrite32(0, port_base + MAR0);
 623	iowrite32(0, port_base + MAR0 + 4);
 624
 625	/* init rx ring */
 626	iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);
 627	priv->rx_ring_tail = priv->rx_ring_dma_addr;
 628
 629	/* init tx ring */
 630	_sc92031_tx_clear(dev);
 631
 632	/* clear old register values */
 633	priv->intr_status = 0;
 634	atomic_set(&priv->intr_mask, 0);
 635	priv->rx_config = 0;
 636	priv->tx_config = 0;
 637	priv->mc_flags = 0;
 638
 639	/* configure rx buffer size */
 640	/* NOTE: vendor driver had dead code here to enable early tx/rx */
 641	iowrite32(Cfg1_Rcv64K, port_base + Config1);
 642
 643	_sc92031_phy_reset(dev);
 644	_sc92031_check_media(dev);
 645
 646	/* calculate rx fifo overflow */
 647	priv->rx_value = 0;
 648
 649	/* enable PM */
 650	iowrite32(priv->pm_config, port_base + PMConfig);
 651
 652	/* clear intr register */
 653	ioread32(port_base + IntrStatus);
 654}
 655
 656static void _sc92031_tx_tasklet(struct net_device *dev)
 657{
 658	struct sc92031_priv *priv = netdev_priv(dev);
 659	void __iomem *port_base = priv->port_base;
 660
 661	unsigned old_tx_tail;
 662	unsigned entry;
 663	u32 tx_status;
 664
 665	old_tx_tail = priv->tx_tail;
 666	while (priv->tx_head - priv->tx_tail > 0) {
 667		entry = priv->tx_tail % NUM_TX_DESC;
 668		tx_status = ioread32(port_base + TxStatus0 + entry * 4);
 669
 670		if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))
 671			break;
 672
 673		priv->tx_tail++;
 674
 675		if (tx_status & TxStatOK) {
 676			dev->stats.tx_bytes += tx_status & 0x1fff;
 677			dev->stats.tx_packets++;
 678			/* Note: TxCarrierLost is always asserted at 100mbps. */
 679			dev->stats.collisions += (tx_status >> 22) & 0xf;
 680		}
 681
 682		if (tx_status & (TxOutOfWindow | TxAborted)) {
 683			dev->stats.tx_errors++;
 684
 685			if (tx_status & TxAborted)
 686				dev->stats.tx_aborted_errors++;
 687
 688			if (tx_status & TxCarrierLost)
 689				dev->stats.tx_carrier_errors++;
 690
 691			if (tx_status & TxOutOfWindow)
 692				dev->stats.tx_window_errors++;
 693		}
 694
 695		if (tx_status & TxUnderrun)
 696			dev->stats.tx_fifo_errors++;
 697	}
 698
 699	if (priv->tx_tail != old_tx_tail)
 700		if (netif_queue_stopped(dev))
 701			netif_wake_queue(dev);
 702}
 703
 704static void _sc92031_rx_tasklet_error(struct net_device *dev,
 705				      u32 rx_status, unsigned rx_size)
 706{
 707	if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
 708		dev->stats.rx_errors++;
 709		dev->stats.rx_length_errors++;
 710	}
 711
 712	if (!(rx_status & RxStatesOK)) {
 713		dev->stats.rx_errors++;
 714
 715		if (rx_status & (RxHugeFrame | RxSmallFrame))
 716			dev->stats.rx_length_errors++;
 717
 718		if (rx_status & RxBadAlign)
 719			dev->stats.rx_frame_errors++;
 720
 721		if (!(rx_status & RxCRCOK))
 722			dev->stats.rx_crc_errors++;
 723	} else {
 724		struct sc92031_priv *priv = netdev_priv(dev);
 725		priv->rx_loss++;
 726	}
 727}
 728
 729static void _sc92031_rx_tasklet(struct net_device *dev)
 730{
 731	struct sc92031_priv *priv = netdev_priv(dev);
 732	void __iomem *port_base = priv->port_base;
 733
 734	dma_addr_t rx_ring_head;
 735	unsigned rx_len;
 736	unsigned rx_ring_offset;
 737	void *rx_ring = priv->rx_ring;
 738
 739	rx_ring_head = ioread32(port_base + RxBufWPtr);
 740	rmb();
 741
 742	/* rx_ring_head is only 17 bits in the RxBufWPtr register.
 743	 * we need to change it to 32 bits physical address
 744	 */
 745	rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);
 746	rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);
 747	if (rx_ring_head < priv->rx_ring_dma_addr)
 748		rx_ring_head += RX_BUF_LEN;
 749
 750	if (rx_ring_head >= priv->rx_ring_tail)
 751		rx_len = rx_ring_head - priv->rx_ring_tail;
 752	else
 753		rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);
 754
 755	if (!rx_len)
 756		return;
 757
 758	if (unlikely(rx_len > RX_BUF_LEN)) {
 759		if (printk_ratelimit())
 760			printk(KERN_ERR "%s: rx packets length > rx buffer\n",
 761					dev->name);
 762		return;
 763	}
 764
 765	rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;
 766
 767	while (rx_len) {
 768		u32 rx_status;
 769		unsigned rx_size, rx_size_align, pkt_size;
 770		struct sk_buff *skb;
 771
 772		rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));
 773		rmb();
 774
 775		rx_size = rx_status >> 20;
 776		rx_size_align = (rx_size + 3) & ~3;	// for 4 bytes aligned
 777		pkt_size = rx_size - 4;	// Omit the four octet CRC from the length.
 778
 779		rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;
 780
 781		if (unlikely(rx_status == 0 ||
 782			     rx_size > (MAX_ETH_FRAME_SIZE + 4) ||
 783			     rx_size < 16 ||
 784			     !(rx_status & RxStatesOK))) {
 785			_sc92031_rx_tasklet_error(dev, rx_status, rx_size);
 786			break;
 787		}
 788
 789		if (unlikely(rx_size_align + 4 > rx_len)) {
 790			if (printk_ratelimit())
 791				printk(KERN_ERR "%s: rx_len is too small\n", dev->name);
 792			break;
 793		}
 794
 795		rx_len -= rx_size_align + 4;
 796
 797		skb = netdev_alloc_skb_ip_align(dev, pkt_size);
 798		if (unlikely(!skb)) {
 799			if (printk_ratelimit())
 800				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
 801						dev->name, pkt_size);
 802			goto next;
 803		}
 804
 805		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
 806			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
 807				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
 808			memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),
 809				rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));
 810		} else {
 811			memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);
 812		}
 813
 814		skb->protocol = eth_type_trans(skb, dev);
 815		netif_rx(skb);
 816
 817		dev->stats.rx_bytes += pkt_size;
 818		dev->stats.rx_packets++;
 819
 820		if (rx_status & Rx_Multicast)
 821			dev->stats.multicast++;
 822
 823	next:
 824		rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
 825	}
 826	mb();
 827
 828	priv->rx_ring_tail = rx_ring_head;
 829	iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);
 830}
 831
 832static void _sc92031_link_tasklet(struct net_device *dev)
 833{
 834	if (_sc92031_check_media(dev))
 835		netif_wake_queue(dev);
 836	else {
 837		netif_stop_queue(dev);
 838		dev->stats.tx_carrier_errors++;
 839	}
 840}
 841
 842static void sc92031_tasklet(unsigned long data)
 843{
 844	struct net_device *dev = (struct net_device *)data;
 845	struct sc92031_priv *priv = netdev_priv(dev);
 846	void __iomem *port_base = priv->port_base;
 847	u32 intr_status, intr_mask;
 848
 849	intr_status = priv->intr_status;
 850
 851	spin_lock(&priv->lock);
 852
 853	if (unlikely(!netif_running(dev)))
 854		goto out;
 855
 856	if (intr_status & TxOK)
 857		_sc92031_tx_tasklet(dev);
 858
 859	if (intr_status & RxOK)
 860		_sc92031_rx_tasklet(dev);
 861
 862	if (intr_status & RxOverflow)
 863		dev->stats.rx_errors++;
 864
 865	if (intr_status & TimeOut) {
 866		dev->stats.rx_errors++;
 867		dev->stats.rx_length_errors++;
 868	}
 869
 870	if (intr_status & (LinkFail | LinkOK))
 871		_sc92031_link_tasklet(dev);
 872
 873out:
 874	intr_mask = atomic_read(&priv->intr_mask);
 875	rmb();
 876
 877	iowrite32(intr_mask, port_base + IntrMask);
 878	mmiowb();
 879
 880	spin_unlock(&priv->lock);
 881}
 882
 883static irqreturn_t sc92031_interrupt(int irq, void *dev_id)
 884{
 885	struct net_device *dev = dev_id;
 886	struct sc92031_priv *priv = netdev_priv(dev);
 887	void __iomem *port_base = priv->port_base;
 888	u32 intr_status, intr_mask;
 889
 890	/* mask interrupts before clearing IntrStatus */
 891	iowrite32(0, port_base + IntrMask);
 892	_sc92031_dummy_read(port_base);
 893
 894	intr_status = ioread32(port_base + IntrStatus);
 895	if (unlikely(intr_status == 0xffffffff))
 896		return IRQ_NONE;	// hardware has gone missing
 897
 898	intr_status &= IntrBits;
 899	if (!intr_status)
 900		goto out_none;
 901
 902	priv->intr_status = intr_status;
 903	tasklet_schedule(&priv->tasklet);
 904
 905	return IRQ_HANDLED;
 906
 907out_none:
 908	intr_mask = atomic_read(&priv->intr_mask);
 909	rmb();
 910
 911	iowrite32(intr_mask, port_base + IntrMask);
 912	mmiowb();
 913
 914	return IRQ_NONE;
 915}
 916
 917static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
 918{
 919	struct sc92031_priv *priv = netdev_priv(dev);
 920	void __iomem *port_base = priv->port_base;
 921
 922	// FIXME I do not understand what is this trying to do.
 923	if (netif_running(dev)) {
 924		int temp;
 925
 926		spin_lock_bh(&priv->lock);
 927
 928		/* Update the error count. */
 929		temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;
 930
 931		if (temp == 0xffff) {
 932			priv->rx_value += temp;
 933			dev->stats.rx_fifo_errors = priv->rx_value;
 934		} else
 935			dev->stats.rx_fifo_errors = temp + priv->rx_value;
 936
 937		spin_unlock_bh(&priv->lock);
 938	}
 939
 940	return &dev->stats;
 941}
 942
 943static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
 944				      struct net_device *dev)
 945{
 946	struct sc92031_priv *priv = netdev_priv(dev);
 947	void __iomem *port_base = priv->port_base;
 948	unsigned len;
 949	unsigned entry;
 950	u32 tx_status;
 951
 952	if (unlikely(skb->len > TX_BUF_SIZE)) {
 953		dev->stats.tx_dropped++;
 954		goto out;
 955	}
 956
 957	spin_lock(&priv->lock);
 958
 959	if (unlikely(!netif_carrier_ok(dev))) {
 960		dev->stats.tx_dropped++;
 961		goto out_unlock;
 962	}
 963
 964	BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);
 965
 966	entry = priv->tx_head++ % NUM_TX_DESC;
 967
 968	skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
 969
 970	len = skb->len;
 971	if (len < ETH_ZLEN) {
 972		memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
 973				0, ETH_ZLEN - len);
 974		len = ETH_ZLEN;
 975	}
 976
 977	wmb();
 978
 979	if (len < 100)
 980		tx_status = len;
 981	else if (len < 300)
 982		tx_status = 0x30000 | len;
 983	else
 984		tx_status = 0x50000 | len;
 985
 986	iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,
 987			port_base + TxAddr0 + entry * 4);
 988	iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
 989	mmiowb();
 990
 991	if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
 992		netif_stop_queue(dev);
 993
 994out_unlock:
 995	spin_unlock(&priv->lock);
 996
 997out:
 998	dev_kfree_skb(skb);
 999
1000	return NETDEV_TX_OK;
1001}
1002
1003static int sc92031_open(struct net_device *dev)
1004{
1005	int err;
1006	struct sc92031_priv *priv = netdev_priv(dev);
1007	struct pci_dev *pdev = priv->pdev;
1008
1009	priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
1010			&priv->rx_ring_dma_addr);
1011	if (unlikely(!priv->rx_ring)) {
1012		err = -ENOMEM;
1013		goto out_alloc_rx_ring;
1014	}
1015
1016	priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
1017			&priv->tx_bufs_dma_addr);
1018	if (unlikely(!priv->tx_bufs)) {
1019		err = -ENOMEM;
1020		goto out_alloc_tx_bufs;
1021	}
1022	priv->tx_head = priv->tx_tail = 0;
1023
1024	err = request_irq(pdev->irq, sc92031_interrupt,
1025			IRQF_SHARED, dev->name, dev);
1026	if (unlikely(err < 0))
1027		goto out_request_irq;
1028
1029	priv->pm_config = 0;
1030
1031	/* Interrupts already disabled by sc92031_stop or sc92031_probe */
1032	spin_lock_bh(&priv->lock);
1033
1034	_sc92031_reset(dev);
1035	mmiowb();
1036
1037	spin_unlock_bh(&priv->lock);
1038	sc92031_enable_interrupts(dev);
1039
1040	if (netif_carrier_ok(dev))
1041		netif_start_queue(dev);
1042	else
1043		netif_tx_disable(dev);
1044
1045	return 0;
1046
1047out_request_irq:
1048	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1049			priv->tx_bufs_dma_addr);
1050out_alloc_tx_bufs:
1051	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1052			priv->rx_ring_dma_addr);
1053out_alloc_rx_ring:
1054	return err;
1055}
1056
1057static int sc92031_stop(struct net_device *dev)
1058{
1059	struct sc92031_priv *priv = netdev_priv(dev);
1060	struct pci_dev *pdev = priv->pdev;
1061
1062	netif_tx_disable(dev);
1063
1064	/* Disable interrupts, stop Tx and Rx. */
1065	sc92031_disable_interrupts(dev);
1066
1067	spin_lock_bh(&priv->lock);
1068
1069	_sc92031_disable_tx_rx(dev);
1070	_sc92031_tx_clear(dev);
1071	mmiowb();
1072
1073	spin_unlock_bh(&priv->lock);
1074
1075	free_irq(pdev->irq, dev);
1076	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
1077			priv->tx_bufs_dma_addr);
1078	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
1079			priv->rx_ring_dma_addr);
1080
1081	return 0;
1082}
1083
1084static void sc92031_set_multicast_list(struct net_device *dev)
1085{
1086	struct sc92031_priv *priv = netdev_priv(dev);
1087
1088	spin_lock_bh(&priv->lock);
1089
1090	_sc92031_set_mar(dev);
1091	_sc92031_set_rx_config(dev);
1092	mmiowb();
1093
1094	spin_unlock_bh(&priv->lock);
1095}
1096
1097static void sc92031_tx_timeout(struct net_device *dev)
1098{
1099	struct sc92031_priv *priv = netdev_priv(dev);
1100
1101	/* Disable interrupts by clearing the interrupt mask.*/
1102	sc92031_disable_interrupts(dev);
1103
1104	spin_lock(&priv->lock);
1105
1106	priv->tx_timeouts++;
1107
1108	_sc92031_reset(dev);
1109	mmiowb();
1110
1111	spin_unlock(&priv->lock);
1112
1113	/* enable interrupts */
1114	sc92031_enable_interrupts(dev);
1115
1116	if (netif_carrier_ok(dev))
1117		netif_wake_queue(dev);
1118}
1119
1120#ifdef CONFIG_NET_POLL_CONTROLLER
1121static void sc92031_poll_controller(struct net_device *dev)
1122{
1123	disable_irq(dev->irq);
1124	if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE)
1125		sc92031_tasklet((unsigned long)dev);
1126	enable_irq(dev->irq);
1127}
1128#endif
1129
1130static int sc92031_ethtool_get_settings(struct net_device *dev,
1131		struct ethtool_cmd *cmd)
1132{
1133	struct sc92031_priv *priv = netdev_priv(dev);
1134	void __iomem *port_base = priv->port_base;
1135	u8 phy_address;
1136	u32 phy_ctrl;
1137	u16 output_status;
1138
1139	spin_lock_bh(&priv->lock);
1140
1141	phy_address = ioread32(port_base + Miicmd1) >> 27;
1142	phy_ctrl = ioread32(port_base + PhyCtrl);
1143
1144	output_status = _sc92031_mii_read(port_base, MII_OutputStatus);
1145	_sc92031_mii_scan(port_base);
1146	mmiowb();
1147
1148	spin_unlock_bh(&priv->lock);
1149
1150	cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full
1151			| SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full
1152			| SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII;
1153
1154	cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
1155
1156	if ((phy_ctrl & (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1157			== (PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10))
1158		cmd->advertising |= ADVERTISED_Autoneg;
1159
1160	if ((phy_ctrl & PhyCtrlSpd10) == PhyCtrlSpd10)
1161		cmd->advertising |= ADVERTISED_10baseT_Half;
1162
1163	if ((phy_ctrl & (PhyCtrlSpd10 | PhyCtrlDux))
1164			== (PhyCtrlSpd10 | PhyCtrlDux))
1165		cmd->advertising |= ADVERTISED_10baseT_Full;
1166
1167	if ((phy_ctrl & PhyCtrlSpd100) == PhyCtrlSpd100)
1168		cmd->advertising |= ADVERTISED_100baseT_Half;
1169
1170	if ((phy_ctrl & (PhyCtrlSpd100 | PhyCtrlDux))
1171			== (PhyCtrlSpd100 | PhyCtrlDux))
1172		cmd->advertising |= ADVERTISED_100baseT_Full;
1173
1174	if (phy_ctrl & PhyCtrlAne)
1175		cmd->advertising |= ADVERTISED_Autoneg;
1176
1177	ethtool_cmd_speed_set(cmd,
1178			      (output_status & 0x2) ? SPEED_100 : SPEED_10);
1179	cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
1180	cmd->port = PORT_MII;
1181	cmd->phy_address = phy_address;
1182	cmd->transceiver = XCVR_INTERNAL;
1183	cmd->autoneg = (phy_ctrl & PhyCtrlAne) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1184
1185	return 0;
1186}
1187
1188static int sc92031_ethtool_set_settings(struct net_device *dev,
1189		struct ethtool_cmd *cmd)
1190{
1191	struct sc92031_priv *priv = netdev_priv(dev);
1192	void __iomem *port_base = priv->port_base;
1193	u32 speed = ethtool_cmd_speed(cmd);
1194	u32 phy_ctrl;
1195	u32 old_phy_ctrl;
1196
1197	if (!(speed == SPEED_10 || speed == SPEED_100))
1198		return -EINVAL;
1199	if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
1200		return -EINVAL;
1201	if (!(cmd->port == PORT_MII))
1202		return -EINVAL;
1203	if (!(cmd->phy_address == 0x1f))
1204		return -EINVAL;
1205	if (!(cmd->transceiver == XCVR_INTERNAL))
1206		return -EINVAL;
1207	if (!(cmd->autoneg == AUTONEG_DISABLE || cmd->autoneg == AUTONEG_ENABLE))
1208		return -EINVAL;
1209
1210	if (cmd->autoneg == AUTONEG_ENABLE) {
1211		if (!(cmd->advertising & (ADVERTISED_Autoneg
1212				| ADVERTISED_100baseT_Full
1213				| ADVERTISED_100baseT_Half
1214				| ADVERTISED_10baseT_Full
1215				| ADVERTISED_10baseT_Half)))
1216			return -EINVAL;
1217
1218		phy_ctrl = PhyCtrlAne;
1219
1220		// FIXME: I'm not sure what the original code was trying to do
1221		if (cmd->advertising & ADVERTISED_Autoneg)
1222			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;
1223		if (cmd->advertising & ADVERTISED_100baseT_Full)
1224			phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;
1225		if (cmd->advertising & ADVERTISED_100baseT_Half)
1226			phy_ctrl |= PhyCtrlSpd100;
1227		if (cmd->advertising & ADVERTISED_10baseT_Full)
1228			phy_ctrl |= PhyCtrlSpd10 | PhyCtrlDux;
1229		if (cmd->advertising & ADVERTISED_10baseT_Half)
1230			phy_ctrl |= PhyCtrlSpd10;
1231	} else {
1232		// FIXME: Whole branch guessed
1233		phy_ctrl = 0;
1234
1235		if (speed == SPEED_10)
1236			phy_ctrl |= PhyCtrlSpd10;
1237		else /* cmd->speed == SPEED_100 */
1238			phy_ctrl |= PhyCtrlSpd100;
1239
1240		if (cmd->duplex == DUPLEX_FULL)
1241			phy_ctrl |= PhyCtrlDux;
1242	}
1243
1244	spin_lock_bh(&priv->lock);
1245
1246	old_phy_ctrl = ioread32(port_base + PhyCtrl);
1247	phy_ctrl |= old_phy_ctrl & ~(PhyCtrlAne | PhyCtrlDux
1248			| PhyCtrlSpd100 | PhyCtrlSpd10);
1249	if (phy_ctrl != old_phy_ctrl)
1250		iowrite32(phy_ctrl, port_base + PhyCtrl);
1251
1252	spin_unlock_bh(&priv->lock);
1253
1254	return 0;
1255}
1256
1257static void sc92031_ethtool_get_wol(struct net_device *dev,
1258		struct ethtool_wolinfo *wolinfo)
1259{
1260	struct sc92031_priv *priv = netdev_priv(dev);
1261	void __iomem *port_base = priv->port_base;
1262	u32 pm_config;
1263
1264	spin_lock_bh(&priv->lock);
1265	pm_config = ioread32(port_base + PMConfig);
1266	spin_unlock_bh(&priv->lock);
1267
1268	// FIXME: Guessed
1269	wolinfo->supported = WAKE_PHY | WAKE_MAGIC
1270			| WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1271	wolinfo->wolopts = 0;
1272
1273	if (pm_config & PM_LinkUp)
1274		wolinfo->wolopts |= WAKE_PHY;
1275
1276	if (pm_config & PM_Magic)
1277		wolinfo->wolopts |= WAKE_MAGIC;
1278
1279	if (pm_config & PM_WakeUp)
1280		// FIXME: Guessed
1281		wolinfo->wolopts |= WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
1282}
1283
1284static int sc92031_ethtool_set_wol(struct net_device *dev,
1285		struct ethtool_wolinfo *wolinfo)
1286{
1287	struct sc92031_priv *priv = netdev_priv(dev);
1288	void __iomem *port_base = priv->port_base;
1289	u32 pm_config;
1290
1291	spin_lock_bh(&priv->lock);
1292
1293	pm_config = ioread32(port_base + PMConfig)
1294			& ~(PM_LinkUp | PM_Magic | PM_WakeUp);
1295
1296	if (wolinfo->wolopts & WAKE_PHY)
1297		pm_config |= PM_LinkUp;
1298
1299	if (wolinfo->wolopts & WAKE_MAGIC)
1300		pm_config |= PM_Magic;
1301
1302	// FIXME: Guessed
1303	if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST))
1304		pm_config |= PM_WakeUp;
1305
1306	priv->pm_config = pm_config;
1307	iowrite32(pm_config, port_base + PMConfig);
1308	mmiowb();
1309
1310	spin_unlock_bh(&priv->lock);
1311
1312	return 0;
1313}
1314
1315static int sc92031_ethtool_nway_reset(struct net_device *dev)
1316{
1317	int err = 0;
1318	struct sc92031_priv *priv = netdev_priv(dev);
1319	void __iomem *port_base = priv->port_base;
1320	u16 bmcr;
1321
1322	spin_lock_bh(&priv->lock);
1323
1324	bmcr = _sc92031_mii_read(port_base, MII_BMCR);
1325	if (!(bmcr & BMCR_ANENABLE)) {
1326		err = -EINVAL;
1327		goto out;
1328	}
1329
1330	_sc92031_mii_write(port_base, MII_BMCR, bmcr | BMCR_ANRESTART);
1331
1332out:
1333	_sc92031_mii_scan(port_base);
1334	mmiowb();
1335
1336	spin_unlock_bh(&priv->lock);
1337
1338	return err;
1339}
1340
1341static const char sc92031_ethtool_stats_strings[SILAN_STATS_NUM][ETH_GSTRING_LEN] = {
1342	"tx_timeout",
1343	"rx_loss",
1344};
1345
1346static void sc92031_ethtool_get_strings(struct net_device *dev,
1347		u32 stringset, u8 *data)
1348{
1349	if (stringset == ETH_SS_STATS)
1350		memcpy(data, sc92031_ethtool_stats_strings,
1351				SILAN_STATS_NUM * ETH_GSTRING_LEN);
1352}
1353
1354static int sc92031_ethtool_get_sset_count(struct net_device *dev, int sset)
1355{
1356	switch (sset) {
1357	case ETH_SS_STATS:
1358		return SILAN_STATS_NUM;
1359	default:
1360		return -EOPNOTSUPP;
1361	}
1362}
1363
1364static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1365		struct ethtool_stats *stats, u64 *data)
1366{
1367	struct sc92031_priv *priv = netdev_priv(dev);
1368
1369	spin_lock_bh(&priv->lock);
1370	data[0] = priv->tx_timeouts;
1371	data[1] = priv->rx_loss;
1372	spin_unlock_bh(&priv->lock);
1373}
1374
1375static const struct ethtool_ops sc92031_ethtool_ops = {
1376	.get_settings		= sc92031_ethtool_get_settings,
1377	.set_settings		= sc92031_ethtool_set_settings,
1378	.get_wol		= sc92031_ethtool_get_wol,
1379	.set_wol		= sc92031_ethtool_set_wol,
1380	.nway_reset		= sc92031_ethtool_nway_reset,
1381	.get_link		= ethtool_op_get_link,
1382	.get_strings		= sc92031_ethtool_get_strings,
1383	.get_sset_count		= sc92031_ethtool_get_sset_count,
1384	.get_ethtool_stats	= sc92031_ethtool_get_ethtool_stats,
1385};
1386
1387
1388static const struct net_device_ops sc92031_netdev_ops = {
1389	.ndo_get_stats		= sc92031_get_stats,
1390	.ndo_start_xmit		= sc92031_start_xmit,
1391	.ndo_open		= sc92031_open,
1392	.ndo_stop		= sc92031_stop,
1393	.ndo_set_multicast_list	= sc92031_set_multicast_list,
1394	.ndo_change_mtu		= eth_change_mtu,
1395	.ndo_validate_addr	= eth_validate_addr,
1396	.ndo_set_mac_address 	= eth_mac_addr,
1397	.ndo_tx_timeout		= sc92031_tx_timeout,
1398#ifdef CONFIG_NET_POLL_CONTROLLER
1399	.ndo_poll_controller	= sc92031_poll_controller,
1400#endif
1401};
1402
1403static int __devinit sc92031_probe(struct pci_dev *pdev,
1404		const struct pci_device_id *id)
1405{
1406	int err;
1407	void __iomem* port_base;
1408	struct net_device *dev;
1409	struct sc92031_priv *priv;
1410	u32 mac0, mac1;
1411	unsigned long base_addr;
1412
1413	err = pci_enable_device(pdev);
1414	if (unlikely(err < 0))
1415		goto out_enable_device;
1416
1417	pci_set_master(pdev);
1418
1419	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1420	if (unlikely(err < 0))
1421		goto out_set_dma_mask;
1422
1423	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1424	if (unlikely(err < 0))
1425		goto out_set_dma_mask;
1426
1427	err = pci_request_regions(pdev, SC92031_NAME);
1428	if (unlikely(err < 0))
1429		goto out_request_regions;
1430
1431	port_base = pci_iomap(pdev, SC92031_USE_BAR, 0);
1432	if (unlikely(!port_base)) {
1433		err = -EIO;
1434		goto out_iomap;
1435	}
1436
1437	dev = alloc_etherdev(sizeof(struct sc92031_priv));
1438	if (unlikely(!dev)) {
1439		err = -ENOMEM;
1440		goto out_alloc_etherdev;
1441	}
1442
1443	pci_set_drvdata(pdev, dev);
1444	SET_NETDEV_DEV(dev, &pdev->dev);
1445
1446#if SC92031_USE_BAR == 0
1447	dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1448	dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1449#elif SC92031_USE_BAR == 1
1450	dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1451#endif
1452	dev->irq = pdev->irq;
1453
1454	/* faked with skb_copy_and_csum_dev */
1455	dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1456		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1457
1458	dev->netdev_ops		= &sc92031_netdev_ops;
1459	dev->watchdog_timeo	= TX_TIMEOUT;
1460	dev->ethtool_ops	= &sc92031_ethtool_ops;
1461
1462	priv = netdev_priv(dev);
1463	spin_lock_init(&priv->lock);
1464	priv->port_base = port_base;
1465	priv->pdev = pdev;
1466	tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
1467	/* Fudge tasklet count so the call to sc92031_enable_interrupts at
1468	 * sc92031_open will work correctly */
1469	tasklet_disable_nosync(&priv->tasklet);
1470
1471	/* PCI PM Wakeup */
1472	iowrite32((~PM_LongWF & ~PM_LWPTN) | PM_Enable, port_base + PMConfig);
1473
1474	mac0 = ioread32(port_base + MAC0);
1475	mac1 = ioread32(port_base + MAC0 + 4);
1476	dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
1477	dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
1478	dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
1479	dev->dev_addr[3] = dev->perm_addr[3] = mac0;
1480	dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
1481	dev->dev_addr[5] = dev->perm_addr[5] = mac1;
1482
1483	err = register_netdev(dev);
1484	if (err < 0)
1485		goto out_register_netdev;
1486
1487#if SC92031_USE_BAR == 0
1488	base_addr = dev->mem_start;
1489#elif SC92031_USE_BAR == 1
1490	base_addr = dev->base_addr;
1491#endif
1492	printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1493			base_addr, dev->dev_addr, dev->irq);
1494
1495	return 0;
1496
1497out_register_netdev:
1498	free_netdev(dev);
1499out_alloc_etherdev:
1500	pci_iounmap(pdev, port_base);
1501out_iomap:
1502	pci_release_regions(pdev);
1503out_request_regions:
1504out_set_dma_mask:
1505	pci_disable_device(pdev);
1506out_enable_device:
1507	return err;
1508}
1509
1510static void __devexit sc92031_remove(struct pci_dev *pdev)
1511{
1512	struct net_device *dev = pci_get_drvdata(pdev);
1513	struct sc92031_priv *priv = netdev_priv(dev);
1514	void __iomem* port_base = priv->port_base;
1515
1516	unregister_netdev(dev);
1517	free_netdev(dev);
1518	pci_iounmap(pdev, port_base);
1519	pci_release_regions(pdev);
1520	pci_disable_device(pdev);
1521}
1522
1523static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state)
1524{
1525	struct net_device *dev = pci_get_drvdata(pdev);
1526	struct sc92031_priv *priv = netdev_priv(dev);
1527
1528	pci_save_state(pdev);
1529
1530	if (!netif_running(dev))
1531		goto out;
1532
1533	netif_device_detach(dev);
1534
1535	/* Disable interrupts, stop Tx and Rx. */
1536	sc92031_disable_interrupts(dev);
1537
1538	spin_lock_bh(&priv->lock);
1539
1540	_sc92031_disable_tx_rx(dev);
1541	_sc92031_tx_clear(dev);
1542	mmiowb();
1543
1544	spin_unlock_bh(&priv->lock);
1545
1546out:
1547	pci_set_power_state(pdev, pci_choose_state(pdev, state));
1548
1549	return 0;
1550}
1551
1552static int sc92031_resume(struct pci_dev *pdev)
1553{
1554	struct net_device *dev = pci_get_drvdata(pdev);
1555	struct sc92031_priv *priv = netdev_priv(dev);
1556
1557	pci_restore_state(pdev);
1558	pci_set_power_state(pdev, PCI_D0);
1559
1560	if (!netif_running(dev))
1561		goto out;
1562
1563	/* Interrupts already disabled by sc92031_suspend */
1564	spin_lock_bh(&priv->lock);
1565
1566	_sc92031_reset(dev);
1567	mmiowb();
1568
1569	spin_unlock_bh(&priv->lock);
1570	sc92031_enable_interrupts(dev);
1571
1572	netif_device_attach(dev);
1573
1574	if (netif_carrier_ok(dev))
1575		netif_wake_queue(dev);
1576	else
1577		netif_tx_disable(dev);
1578
1579out:
1580	return 0;
1581}
1582
1583static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
1584	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
1585	{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
1586	{ PCI_DEVICE(0x1088, 0x2031) },
1587	{ 0, }
1588};
1589MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table);
1590
1591static struct pci_driver sc92031_pci_driver = {
1592	.name		= SC92031_NAME,
1593	.id_table	= sc92031_pci_device_id_table,
1594	.probe		= sc92031_probe,
1595	.remove		= __devexit_p(sc92031_remove),
1596	.suspend	= sc92031_suspend,
1597	.resume		= sc92031_resume,
1598};
1599
1600static int __init sc92031_init(void)
1601{
1602	return pci_register_driver(&sc92031_pci_driver);
1603}
1604
1605static void __exit sc92031_exit(void)
1606{
1607	pci_unregister_driver(&sc92031_pci_driver);
1608}
1609
1610module_init(sc92031_init);
1611module_exit(sc92031_exit);
1612
1613MODULE_LICENSE("GPL");
1614MODULE_AUTHOR("Cesar Eduardo Barros <cesarb@cesarb.net>");
1615MODULE_DESCRIPTION("Silan SC92031 PCI Fast Ethernet Adapter driver");