Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1
   2/* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver
   3 * Copyright (C) 2004 Advanced Micro Devices
   4 *
   5 *
   6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
   7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
   8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
   9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
  10 * Copyright 1993 United States Government as represented by the
  11 *	Director, National Security Agency.[ pcnet32.c ]
  12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
  13 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
  14 *
  15 *
  16 * This program is free software; you can redistribute it and/or modify
  17 * it under the terms of the GNU General Public License as published by
  18 * the Free Software Foundation; either version 2 of the License, or
  19 * (at your option) any later version.
  20 *
  21 * This program is distributed in the hope that it will be useful,
  22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  24 * GNU General Public License for more details.
  25 *
  26 * You should have received a copy of the GNU General Public License
  27 * along with this program; if not, write to the Free Software
  28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
  29 * USA
  30
  31Module Name:
  32
  33	amd8111e.c
  34
  35Abstract:
  36
  37 	 AMD8111 based 10/100 Ethernet Controller Driver.
  38
  39Environment:
  40
  41	Kernel Mode
  42
  43Revision History:
  44 	3.0.0
  45	   Initial Revision.
  46	3.0.1
  47	 1. Dynamic interrupt coalescing.
  48	 2. Removed prev_stats.
  49	 3. MII support.
  50	 4. Dynamic IPG support
  51	3.0.2  05/29/2003
  52	 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
  53	 2. Bug fix: Fixed VLAN support failure.
  54	 3. Bug fix: Fixed receive interrupt coalescing bug.
  55	 4. Dynamic IPG support is disabled by default.
  56	3.0.3 06/05/2003
  57	 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
  58	3.0.4 12/09/2003
  59	 1. Added set_mac_address routine for bonding driver support.
  60	 2. Tested the driver for bonding support
  61	 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
  62	    indicated to the h/w.
  63	 4. Modified amd8111e_rx() routine to receive all the received packets
  64	    in the first interrupt.
  65	 5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
  66	3.0.5 03/22/2004
  67	 1. Added NAPI support
  68
  69*/
  70
  71
  72#include <linux/module.h>
  73#include <linux/kernel.h>
  74#include <linux/types.h>
  75#include <linux/compiler.h>
  76#include <linux/delay.h>
  77#include <linux/init.h>
  78#include <linux/interrupt.h>
  79#include <linux/ioport.h>
  80#include <linux/pci.h>
  81#include <linux/netdevice.h>
  82#include <linux/etherdevice.h>
  83#include <linux/skbuff.h>
  84#include <linux/ethtool.h>
  85#include <linux/mii.h>
  86#include <linux/if_vlan.h>
  87#include <linux/ctype.h>
  88#include <linux/crc32.h>
  89#include <linux/dma-mapping.h>
  90
  91#include <asm/system.h>
  92#include <asm/io.h>
  93#include <asm/byteorder.h>
  94#include <asm/uaccess.h>
  95
  96#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  97#define AMD8111E_VLAN_TAG_USED 1
  98#else
  99#define AMD8111E_VLAN_TAG_USED 0
 100#endif
 101
 102#include "amd8111e.h"
 103#define MODULE_NAME	"amd8111e"
 104#define MODULE_VERS	"3.0.7"
 105MODULE_AUTHOR("Advanced Micro Devices, Inc.");
 106MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
 107MODULE_LICENSE("GPL");
 108MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 109module_param_array(speed_duplex, int, NULL, 0);
 110MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
 111module_param_array(coalesce, bool, NULL, 0);
 112MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
 113module_param_array(dynamic_ipg, bool, NULL, 0);
 114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
 115
 116static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
 117
 118	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
 119	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
 120	{ 0, }
 121
 122};
 123/*
 124This function will read the PHY registers.
 125*/
 126static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
 127{
 128	void __iomem *mmio = lp->mmio;
 129	unsigned int reg_val;
 130	unsigned int repeat= REPEAT_CNT;
 131
 132	reg_val = readl(mmio + PHY_ACCESS);
 133	while (reg_val & PHY_CMD_ACTIVE)
 134		reg_val = readl( mmio + PHY_ACCESS );
 135
 136	writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
 137			   ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
 138	do{
 139		reg_val = readl(mmio + PHY_ACCESS);
 140		udelay(30);  /* It takes 30 us to read/write data */
 141	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
 142	if(reg_val & PHY_RD_ERR)
 143		goto err_phy_read;
 144
 145	*val = reg_val & 0xffff;
 146	return 0;
 147err_phy_read:
 148	*val = 0;
 149	return -EINVAL;
 150
 151}
 152
 153/*
 154This function will write into PHY registers.
 155*/
 156static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
 157{
 158	unsigned int repeat = REPEAT_CNT;
 159	void __iomem *mmio = lp->mmio;
 160	unsigned int reg_val;
 161
 162	reg_val = readl(mmio + PHY_ACCESS);
 163	while (reg_val & PHY_CMD_ACTIVE)
 164		reg_val = readl( mmio + PHY_ACCESS );
 165
 166	writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
 167			   ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
 168
 169	do{
 170		reg_val = readl(mmio + PHY_ACCESS);
 171		udelay(30);  /* It takes 30 us to read/write the data */
 172	} while (--repeat && (reg_val & PHY_CMD_ACTIVE));
 173
 174	if(reg_val & PHY_RD_ERR)
 175		goto err_phy_write;
 176
 177	return 0;
 178
 179err_phy_write:
 180	return -EINVAL;
 181
 182}
 183/*
 184This is the mii register read function provided to the mii interface.
 185*/
 186static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
 187{
 188	struct amd8111e_priv* lp = netdev_priv(dev);
 189	unsigned int reg_val;
 190
 191	amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
 192	return reg_val;
 193
 194}
 195
 196/*
 197This is the mii register write function provided to the mii interface.
 198*/
 199static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
 200{
 201	struct amd8111e_priv* lp = netdev_priv(dev);
 202
 203	amd8111e_write_phy(lp, phy_id, reg_num, val);
 204}
 205
 206/*
 207This function will set PHY speed. During initialization sets the original speed to 100 full.
 208*/
 209static void amd8111e_set_ext_phy(struct net_device *dev)
 210{
 211	struct amd8111e_priv *lp = netdev_priv(dev);
 212	u32 bmcr,advert,tmp;
 213
 214	/* Determine mii register values to set the speed */
 215	advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
 216	tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
 217	switch (lp->ext_phy_option){
 218
 219		default:
 220		case SPEED_AUTONEG: /* advertise all values */
 221			tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
 222				ADVERTISE_100HALF|ADVERTISE_100FULL) ;
 223			break;
 224		case SPEED10_HALF:
 225			tmp |= ADVERTISE_10HALF;
 226			break;
 227		case SPEED10_FULL:
 228			tmp |= ADVERTISE_10FULL;
 229			break;
 230		case SPEED100_HALF:
 231			tmp |= ADVERTISE_100HALF;
 232			break;
 233		case SPEED100_FULL:
 234			tmp |= ADVERTISE_100FULL;
 235			break;
 236	}
 237
 238	if(advert != tmp)
 239		amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
 240	/* Restart auto negotiation */
 241	bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
 242	bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 243	amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
 244
 245}
 246
 247/*
 248This function will unmap skb->data space and will free
 249all transmit and receive skbuffs.
 250*/
 251static int amd8111e_free_skbs(struct net_device *dev)
 252{
 253	struct amd8111e_priv *lp = netdev_priv(dev);
 254	struct sk_buff* rx_skbuff;
 255	int i;
 256
 257	/* Freeing transmit skbs */
 258	for(i = 0; i < NUM_TX_BUFFERS; i++){
 259		if(lp->tx_skbuff[i]){
 260			pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],					lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
 261			dev_kfree_skb (lp->tx_skbuff[i]);
 262			lp->tx_skbuff[i] = NULL;
 263			lp->tx_dma_addr[i] = 0;
 264		}
 265	}
 266	/* Freeing previously allocated receive buffers */
 267	for (i = 0; i < NUM_RX_BUFFERS; i++){
 268		rx_skbuff = lp->rx_skbuff[i];
 269		if(rx_skbuff != NULL){
 270			pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
 271				  lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
 272			dev_kfree_skb(lp->rx_skbuff[i]);
 273			lp->rx_skbuff[i] = NULL;
 274			lp->rx_dma_addr[i] = 0;
 275		}
 276	}
 277
 278	return 0;
 279}
 280
 281/*
 282This will set the receive buffer length corresponding to the mtu size of networkinterface.
 283*/
 284static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
 285{
 286	struct amd8111e_priv* lp = netdev_priv(dev);
 287	unsigned int mtu = dev->mtu;
 288
 289	if (mtu > ETH_DATA_LEN){
 290		/* MTU + ethernet header + FCS
 291		+ optional VLAN tag + skb reserve space 2 */
 292
 293		lp->rx_buff_len = mtu + ETH_HLEN + 10;
 294		lp->options |= OPTION_JUMBO_ENABLE;
 295	} else{
 296		lp->rx_buff_len = PKT_BUFF_SZ;
 297		lp->options &= ~OPTION_JUMBO_ENABLE;
 298	}
 299}
 300
 301/*
 302This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
 303 */
 304static int amd8111e_init_ring(struct net_device *dev)
 305{
 306	struct amd8111e_priv *lp = netdev_priv(dev);
 307	int i;
 308
 309	lp->rx_idx = lp->tx_idx = 0;
 310	lp->tx_complete_idx = 0;
 311	lp->tx_ring_idx = 0;
 312
 313
 314	if(lp->opened)
 315		/* Free previously allocated transmit and receive skbs */
 316		amd8111e_free_skbs(dev);
 317
 318	else{
 319		 /* allocate the tx and rx descriptors */
 320	     	if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
 321			sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
 322			&lp->tx_ring_dma_addr)) == NULL)
 323
 324			goto err_no_mem;
 325
 326	     	if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
 327			sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
 328			&lp->rx_ring_dma_addr)) == NULL)
 329
 330			goto err_free_tx_ring;
 331
 332	}
 333	/* Set new receive buff size */
 334	amd8111e_set_rx_buff_len(dev);
 335
 336	/* Allocating receive  skbs */
 337	for (i = 0; i < NUM_RX_BUFFERS; i++) {
 338
 339		if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
 340				/* Release previos allocated skbs */
 341				for(--i; i >= 0 ;i--)
 342					dev_kfree_skb(lp->rx_skbuff[i]);
 343				goto err_free_rx_ring;
 344		}
 345		skb_reserve(lp->rx_skbuff[i],2);
 346	}
 347        /* Initilaizing receive descriptors */
 348	for (i = 0; i < NUM_RX_BUFFERS; i++) {
 349		lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
 350			lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
 351
 352		lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
 353		lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
 354		wmb();
 355		lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
 356	}
 357
 358	/* Initializing transmit descriptors */
 359	for (i = 0; i < NUM_TX_RING_DR; i++) {
 360		lp->tx_ring[i].buff_phy_addr = 0;
 361		lp->tx_ring[i].tx_flags = 0;
 362		lp->tx_ring[i].buff_count = 0;
 363	}
 364
 365	return 0;
 366
 367err_free_rx_ring:
 368
 369	pci_free_consistent(lp->pci_dev,
 370		sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
 371		lp->rx_ring_dma_addr);
 372
 373err_free_tx_ring:
 374
 375	pci_free_consistent(lp->pci_dev,
 376		 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
 377		 lp->tx_ring_dma_addr);
 378
 379err_no_mem:
 380	return -ENOMEM;
 381}
 382/* This function will set the interrupt coalescing according to the input arguments */
 383static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
 384{
 385	unsigned int timeout;
 386	unsigned int event_count;
 387
 388	struct amd8111e_priv *lp = netdev_priv(dev);
 389	void __iomem *mmio = lp->mmio;
 390	struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
 391
 392
 393	switch(cmod)
 394	{
 395		case RX_INTR_COAL :
 396			timeout = coal_conf->rx_timeout;
 397			event_count = coal_conf->rx_event_count;
 398			if( timeout > MAX_TIMEOUT ||
 399					event_count > MAX_EVENT_COUNT )
 400				return -EINVAL;
 401
 402			timeout = timeout * DELAY_TIMER_CONV;
 403			writel(VAL0|STINTEN, mmio+INTEN0);
 404			writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
 405							mmio+DLY_INT_A);
 406			break;
 407
 408		case TX_INTR_COAL :
 409			timeout = coal_conf->tx_timeout;
 410			event_count = coal_conf->tx_event_count;
 411			if( timeout > MAX_TIMEOUT ||
 412					event_count > MAX_EVENT_COUNT )
 413				return -EINVAL;
 414
 415
 416			timeout = timeout * DELAY_TIMER_CONV;
 417			writel(VAL0|STINTEN,mmio+INTEN0);
 418			writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
 419							 mmio+DLY_INT_B);
 420			break;
 421
 422		case DISABLE_COAL:
 423			writel(0,mmio+STVAL);
 424			writel(STINTEN, mmio+INTEN0);
 425			writel(0, mmio +DLY_INT_B);
 426			writel(0, mmio+DLY_INT_A);
 427			break;
 428		 case ENABLE_COAL:
 429		       /* Start the timer */
 430			writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
 431			writel(VAL0|STINTEN, mmio+INTEN0);
 432			break;
 433		default:
 434			break;
 435
 436   }
 437	return 0;
 438
 439}
 440
 441/*
 442This function initializes the device registers  and starts the device.
 443*/
 444static int amd8111e_restart(struct net_device *dev)
 445{
 446	struct amd8111e_priv *lp = netdev_priv(dev);
 447	void __iomem *mmio = lp->mmio;
 448	int i,reg_val;
 449
 450	/* stop the chip */
 451	 writel(RUN, mmio + CMD0);
 452
 453	if(amd8111e_init_ring(dev))
 454		return -ENOMEM;
 455
 456	/* enable the port manager and set auto negotiation always */
 457	writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
 458	writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
 459
 460	amd8111e_set_ext_phy(dev);
 461
 462	/* set control registers */
 463	reg_val = readl(mmio + CTRL1);
 464	reg_val &= ~XMTSP_MASK;
 465	writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
 466
 467	/* enable interrupt */
 468	writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
 469		APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
 470		SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
 471
 472	writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
 473
 474	/* initialize tx and rx ring base addresses */
 475	writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
 476	writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
 477
 478	writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
 479	writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
 480
 481	/* set default IPG to 96 */
 482	writew((u32)DEFAULT_IPG,mmio+IPG);
 483	writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
 484
 485	if(lp->options & OPTION_JUMBO_ENABLE){
 486		writel((u32)VAL2|JUMBO, mmio + CMD3);
 487		/* Reset REX_UFLO */
 488		writel( REX_UFLO, mmio + CMD2);
 489		/* Should not set REX_UFLO for jumbo frames */
 490		writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
 491	}else{
 492		writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
 493		writel((u32)JUMBO, mmio + CMD3);
 494	}
 495
 496#if AMD8111E_VLAN_TAG_USED
 497	writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
 498#endif
 499	writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
 500
 501	/* Setting the MAC address to the device */
 502	for(i = 0; i < ETH_ADDR_LEN; i++)
 503		writeb( dev->dev_addr[i], mmio + PADR + i );
 504
 505	/* Enable interrupt coalesce */
 506	if(lp->options & OPTION_INTR_COAL_ENABLE){
 507		printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
 508								dev->name);
 509		amd8111e_set_coalesce(dev,ENABLE_COAL);
 510	}
 511
 512	/* set RUN bit to start the chip */
 513	writel(VAL2 | RDMD0, mmio + CMD0);
 514	writel(VAL0 | INTREN | RUN, mmio + CMD0);
 515
 516	/* To avoid PCI posting bug */
 517	readl(mmio+CMD0);
 518	return 0;
 519}
 520/*
 521This function clears necessary the device registers.
 522*/
 523static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
 524{
 525	unsigned int reg_val;
 526	unsigned int logic_filter[2] ={0,};
 527	void __iomem *mmio = lp->mmio;
 528
 529
 530        /* stop the chip */
 531	writel(RUN, mmio + CMD0);
 532
 533	/* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
 534	writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
 535
 536	/* Clear RCV_RING_BASE_ADDR */
 537	writel(0, mmio + RCV_RING_BASE_ADDR0);
 538
 539	/* Clear XMT_RING_BASE_ADDR */
 540	writel(0, mmio + XMT_RING_BASE_ADDR0);
 541	writel(0, mmio + XMT_RING_BASE_ADDR1);
 542	writel(0, mmio + XMT_RING_BASE_ADDR2);
 543	writel(0, mmio + XMT_RING_BASE_ADDR3);
 544
 545	/* Clear CMD0  */
 546	writel(CMD0_CLEAR,mmio + CMD0);
 547
 548	/* Clear CMD2 */
 549	writel(CMD2_CLEAR, mmio +CMD2);
 550
 551	/* Clear CMD7 */
 552	writel(CMD7_CLEAR , mmio + CMD7);
 553
 554	/* Clear DLY_INT_A and DLY_INT_B */
 555	writel(0x0, mmio + DLY_INT_A);
 556	writel(0x0, mmio + DLY_INT_B);
 557
 558	/* Clear FLOW_CONTROL */
 559	writel(0x0, mmio + FLOW_CONTROL);
 560
 561	/* Clear INT0  write 1 to clear register */
 562	reg_val = readl(mmio + INT0);
 563	writel(reg_val, mmio + INT0);
 564
 565	/* Clear STVAL */
 566	writel(0x0, mmio + STVAL);
 567
 568	/* Clear INTEN0 */
 569	writel( INTEN0_CLEAR, mmio + INTEN0);
 570
 571	/* Clear LADRF */
 572	writel(0x0 , mmio + LADRF);
 573
 574	/* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
 575	writel( 0x80010,mmio + SRAM_SIZE);
 576
 577	/* Clear RCV_RING0_LEN */
 578	writel(0x0, mmio +  RCV_RING_LEN0);
 579
 580	/* Clear XMT_RING0/1/2/3_LEN */
 581	writel(0x0, mmio +  XMT_RING_LEN0);
 582	writel(0x0, mmio +  XMT_RING_LEN1);
 583	writel(0x0, mmio +  XMT_RING_LEN2);
 584	writel(0x0, mmio +  XMT_RING_LEN3);
 585
 586	/* Clear XMT_RING_LIMIT */
 587	writel(0x0, mmio + XMT_RING_LIMIT);
 588
 589	/* Clear MIB */
 590	writew(MIB_CLEAR, mmio + MIB_ADDR);
 591
 592	/* Clear LARF */
 593	amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
 594
 595	/* SRAM_SIZE register */
 596	reg_val = readl(mmio + SRAM_SIZE);
 597
 598	if(lp->options & OPTION_JUMBO_ENABLE)
 599		writel( VAL2|JUMBO, mmio + CMD3);
 600#if AMD8111E_VLAN_TAG_USED
 601	writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
 602#endif
 603	/* Set default value to CTRL1 Register */
 604	writel(CTRL1_DEFAULT, mmio + CTRL1);
 605
 606	/* To avoid PCI posting bug */
 607	readl(mmio + CMD2);
 608
 609}
 610
 611/*
 612This function disables the interrupt and clears all the pending
 613interrupts in INT0
 614 */
 615static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
 616{
 617	u32 intr0;
 618
 619	/* Disable interrupt */
 620	writel(INTREN, lp->mmio + CMD0);
 621
 622	/* Clear INT0 */
 623	intr0 = readl(lp->mmio + INT0);
 624	writel(intr0, lp->mmio + INT0);
 625
 626	/* To avoid PCI posting bug */
 627	readl(lp->mmio + INT0);
 628
 629}
 630
 631/*
 632This function stops the chip.
 633*/
 634static void amd8111e_stop_chip(struct amd8111e_priv* lp)
 635{
 636	writel(RUN, lp->mmio + CMD0);
 637
 638	/* To avoid PCI posting bug */
 639	readl(lp->mmio + CMD0);
 640}
 641
 642/*
 643This function frees the  transmiter and receiver descriptor rings.
 644*/
 645static void amd8111e_free_ring(struct amd8111e_priv* lp)
 646{
 647	/* Free transmit and receive descriptor rings */
 648	if(lp->rx_ring){
 649		pci_free_consistent(lp->pci_dev,
 650			sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
 651			lp->rx_ring, lp->rx_ring_dma_addr);
 652		lp->rx_ring = NULL;
 653	}
 654
 655	if(lp->tx_ring){
 656		pci_free_consistent(lp->pci_dev,
 657			sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
 658			lp->tx_ring, lp->tx_ring_dma_addr);
 659
 660		lp->tx_ring = NULL;
 661	}
 662
 663}
 664
 665/*
 666This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
 667*/
 668static int amd8111e_tx(struct net_device *dev)
 669{
 670	struct amd8111e_priv* lp = netdev_priv(dev);
 671	int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
 672	int status;
 673	/* Complete all the transmit packet */
 674	while (lp->tx_complete_idx != lp->tx_idx){
 675		tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
 676		status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
 677
 678		if(status & OWN_BIT)
 679			break;	/* It still hasn't been Txed */
 680
 681		lp->tx_ring[tx_index].buff_phy_addr = 0;
 682
 683		/* We must free the original skb */
 684		if (lp->tx_skbuff[tx_index]) {
 685			pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
 686				  	lp->tx_skbuff[tx_index]->len,
 687					PCI_DMA_TODEVICE);
 688			dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
 689			lp->tx_skbuff[tx_index] = NULL;
 690			lp->tx_dma_addr[tx_index] = 0;
 691		}
 692		lp->tx_complete_idx++;
 693		/*COAL update tx coalescing parameters */
 694		lp->coal_conf.tx_packets++;
 695		lp->coal_conf.tx_bytes +=
 696			le16_to_cpu(lp->tx_ring[tx_index].buff_count);
 697
 698		if (netif_queue_stopped(dev) &&
 699			lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
 700			/* The ring is no longer full, clear tbusy. */
 701			/* lp->tx_full = 0; */
 702			netif_wake_queue (dev);
 703		}
 704	}
 705	return 0;
 706}
 707
 708/* This function handles the driver receive operation in polling mode */
 709static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
 710{
 711	struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
 712	struct net_device *dev = lp->amd8111e_net_dev;
 713	int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
 714	void __iomem *mmio = lp->mmio;
 715	struct sk_buff *skb,*new_skb;
 716	int min_pkt_len, status;
 717	unsigned int intr0;
 718	int num_rx_pkt = 0;
 719	short pkt_len;
 720#if AMD8111E_VLAN_TAG_USED
 721	short vtag;
 722#endif
 723	int rx_pkt_limit = budget;
 724	unsigned long flags;
 725
 726	do{
 727		/* process receive packets until we use the quota*/
 728		/* If we own the next entry, it's a new packet. Send it up. */
 729		while(1) {
 730			status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
 731			if (status & OWN_BIT)
 732				break;
 733
 734			/*
 735			 * There is a tricky error noted by John Murphy,
 736			 * <murf@perftech.com> to Russ Nelson: Even with
 737			 * full-sized * buffers it's possible for a
 738			 * jabber packet to use two buffers, with only
 739			 * the last correctly noting the error.
 740			 */
 741
 742			if(status & ERR_BIT) {
 743				/* reseting flags */
 744				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 745				goto err_next_pkt;
 746			}
 747			/* check for STP and ENP */
 748			if(!((status & STP_BIT) && (status & ENP_BIT))){
 749				/* reseting flags */
 750				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 751				goto err_next_pkt;
 752			}
 753			pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
 754
 755#if AMD8111E_VLAN_TAG_USED
 756			vtag = status & TT_MASK;
 757			/*MAC will strip vlan tag*/
 758			if (vtag != 0)
 759				min_pkt_len =MIN_PKT_LEN - 4;
 760			else
 761#endif
 762				min_pkt_len =MIN_PKT_LEN;
 763
 764			if (pkt_len < min_pkt_len) {
 765				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 766				lp->drv_rx_errors++;
 767				goto err_next_pkt;
 768			}
 769			if(--rx_pkt_limit < 0)
 770				goto rx_not_empty;
 771			if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
 772				/* if allocation fail,
 773				   ignore that pkt and go to next one */
 774				lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
 775				lp->drv_rx_errors++;
 776				goto err_next_pkt;
 777			}
 778
 779			skb_reserve(new_skb, 2);
 780			skb = lp->rx_skbuff[rx_index];
 781			pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
 782					 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
 783			skb_put(skb, pkt_len);
 784			lp->rx_skbuff[rx_index] = new_skb;
 785			lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
 786								   new_skb->data,
 787								   lp->rx_buff_len-2,
 788								   PCI_DMA_FROMDEVICE);
 789
 790			skb->protocol = eth_type_trans(skb, dev);
 791
 792#if AMD8111E_VLAN_TAG_USED
 793			if (vtag == TT_VLAN_TAGGED){
 794				u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
 795				__vlan_hwaccel_put_tag(skb, vlan_tag);
 796			}
 797#endif
 798			netif_receive_skb(skb);
 799			/*COAL update rx coalescing parameters*/
 800			lp->coal_conf.rx_packets++;
 801			lp->coal_conf.rx_bytes += pkt_len;
 802			num_rx_pkt++;
 803
 804		err_next_pkt:
 805			lp->rx_ring[rx_index].buff_phy_addr
 806				= cpu_to_le32(lp->rx_dma_addr[rx_index]);
 807			lp->rx_ring[rx_index].buff_count =
 808				cpu_to_le16(lp->rx_buff_len-2);
 809			wmb();
 810			lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
 811			rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
 812		}
 813		/* Check the interrupt status register for more packets in the
 814		   mean time. Process them since we have not used up our quota.*/
 815
 816		intr0 = readl(mmio + INT0);
 817		/*Ack receive packets */
 818		writel(intr0 & RINT0,mmio + INT0);
 819
 820	} while(intr0 & RINT0);
 821
 822	if (rx_pkt_limit > 0) {
 823		/* Receive descriptor is empty now */
 824		spin_lock_irqsave(&lp->lock, flags);
 825		__napi_complete(napi);
 826		writel(VAL0|RINTEN0, mmio + INTEN0);
 827		writel(VAL2 | RDMD0, mmio + CMD0);
 828		spin_unlock_irqrestore(&lp->lock, flags);
 829	}
 830
 831rx_not_empty:
 832	return num_rx_pkt;
 833}
 834
 835/*
 836This function will indicate the link status to the kernel.
 837*/
 838static int amd8111e_link_change(struct net_device* dev)
 839{
 840	struct amd8111e_priv *lp = netdev_priv(dev);
 841	int status0,speed;
 842
 843	/* read the link change */
 844     	status0 = readl(lp->mmio + STAT0);
 845
 846	if(status0 & LINK_STATS){
 847		if(status0 & AUTONEG_COMPLETE)
 848			lp->link_config.autoneg = AUTONEG_ENABLE;
 849		else
 850			lp->link_config.autoneg = AUTONEG_DISABLE;
 851
 852		if(status0 & FULL_DPLX)
 853			lp->link_config.duplex = DUPLEX_FULL;
 854		else
 855			lp->link_config.duplex = DUPLEX_HALF;
 856		speed = (status0 & SPEED_MASK) >> 7;
 857		if(speed == PHY_SPEED_10)
 858			lp->link_config.speed = SPEED_10;
 859		else if(speed == PHY_SPEED_100)
 860			lp->link_config.speed = SPEED_100;
 861
 862		printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",			dev->name,
 863		       (lp->link_config.speed == SPEED_100) ? "100": "10",
 864		       (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
 865		netif_carrier_on(dev);
 866	}
 867	else{
 868		lp->link_config.speed = SPEED_INVALID;
 869		lp->link_config.duplex = DUPLEX_INVALID;
 870		lp->link_config.autoneg = AUTONEG_INVALID;
 871		printk(KERN_INFO "%s: Link is Down.\n",dev->name);
 872		netif_carrier_off(dev);
 873	}
 874
 875	return 0;
 876}
 877/*
 878This function reads the mib counters.
 879*/
 880static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
 881{
 882	unsigned int  status;
 883	unsigned  int data;
 884	unsigned int repeat = REPEAT_CNT;
 885
 886	writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
 887	do {
 888		status = readw(mmio + MIB_ADDR);
 889		udelay(2);	/* controller takes MAX 2 us to get mib data */
 890	}
 891	while (--repeat && (status & MIB_CMD_ACTIVE));
 892
 893	data = readl(mmio + MIB_DATA);
 894	return data;
 895}
 896
 897/*
 898 * This function reads the mib registers and returns the hardware statistics.
 899 * It updates previous internal driver statistics with new values.
 900 */
 901static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
 902{
 903	struct amd8111e_priv *lp = netdev_priv(dev);
 904	void __iomem *mmio = lp->mmio;
 905	unsigned long flags;
 906	struct net_device_stats *new_stats = &dev->stats;
 907
 908	if (!lp->opened)
 909		return new_stats;
 910	spin_lock_irqsave (&lp->lock, flags);
 911
 912	/* stats.rx_packets */
 913	new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
 914				amd8111e_read_mib(mmio, rcv_multicast_pkts)+
 915				amd8111e_read_mib(mmio, rcv_unicast_pkts);
 916
 917	/* stats.tx_packets */
 918	new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
 919
 920	/*stats.rx_bytes */
 921	new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
 922
 923	/* stats.tx_bytes */
 924	new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
 925
 926	/* stats.rx_errors */
 927	/* hw errors + errors driver reported */
 928	new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
 929				amd8111e_read_mib(mmio, rcv_fragments)+
 930				amd8111e_read_mib(mmio, rcv_jabbers)+
 931				amd8111e_read_mib(mmio, rcv_alignment_errors)+
 932				amd8111e_read_mib(mmio, rcv_fcs_errors)+
 933				amd8111e_read_mib(mmio, rcv_miss_pkts)+
 934				lp->drv_rx_errors;
 935
 936	/* stats.tx_errors */
 937	new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
 938
 939	/* stats.rx_dropped*/
 940	new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
 941
 942	/* stats.tx_dropped*/
 943	new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
 944
 945	/* stats.multicast*/
 946	new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
 947
 948	/* stats.collisions*/
 949	new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
 950
 951	/* stats.rx_length_errors*/
 952	new_stats->rx_length_errors =
 953		amd8111e_read_mib(mmio, rcv_undersize_pkts)+
 954		amd8111e_read_mib(mmio, rcv_oversize_pkts);
 955
 956	/* stats.rx_over_errors*/
 957	new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 958
 959	/* stats.rx_crc_errors*/
 960	new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
 961
 962	/* stats.rx_frame_errors*/
 963	new_stats->rx_frame_errors =
 964		amd8111e_read_mib(mmio, rcv_alignment_errors);
 965
 966	/* stats.rx_fifo_errors */
 967	new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 968
 969	/* stats.rx_missed_errors */
 970	new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
 971
 972	/* stats.tx_aborted_errors*/
 973	new_stats->tx_aborted_errors =
 974		amd8111e_read_mib(mmio, xmt_excessive_collision);
 975
 976	/* stats.tx_carrier_errors*/
 977	new_stats->tx_carrier_errors =
 978		amd8111e_read_mib(mmio, xmt_loss_carrier);
 979
 980	/* stats.tx_fifo_errors*/
 981	new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
 982
 983	/* stats.tx_window_errors*/
 984	new_stats->tx_window_errors =
 985		amd8111e_read_mib(mmio, xmt_late_collision);
 986
 987	/* Reset the mibs for collecting new statistics */
 988	/* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
 989
 990	spin_unlock_irqrestore (&lp->lock, flags);
 991
 992	return new_stats;
 993}
 994/* This function recalculate the interrupt coalescing  mode on every interrupt
 995according to the datarate and the packet rate.
 996*/
 997static int amd8111e_calc_coalesce(struct net_device *dev)
 998{
 999	struct amd8111e_priv *lp = netdev_priv(dev);
1000	struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1001	int tx_pkt_rate;
1002	int rx_pkt_rate;
1003	int tx_data_rate;
1004	int rx_data_rate;
1005	int rx_pkt_size;
1006	int tx_pkt_size;
1007
1008	tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1009	coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1010
1011	tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1012	coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1013
1014	rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1015	coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1016
1017	rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1018	coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1019
1020	if(rx_pkt_rate < 800){
1021		if(coal_conf->rx_coal_type != NO_COALESCE){
1022
1023			coal_conf->rx_timeout = 0x0;
1024			coal_conf->rx_event_count = 0;
1025			amd8111e_set_coalesce(dev,RX_INTR_COAL);
1026			coal_conf->rx_coal_type = NO_COALESCE;
1027		}
1028	}
1029	else{
1030
1031		rx_pkt_size = rx_data_rate/rx_pkt_rate;
1032		if (rx_pkt_size < 128){
1033			if(coal_conf->rx_coal_type != NO_COALESCE){
1034
1035				coal_conf->rx_timeout = 0;
1036				coal_conf->rx_event_count = 0;
1037				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1038				coal_conf->rx_coal_type = NO_COALESCE;
1039			}
1040
1041		}
1042		else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1043
1044			if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1045				coal_conf->rx_timeout = 1;
1046				coal_conf->rx_event_count = 4;
1047				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1048				coal_conf->rx_coal_type = LOW_COALESCE;
1049			}
1050		}
1051		else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1052
1053			if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1054				coal_conf->rx_timeout = 1;
1055				coal_conf->rx_event_count = 4;
1056				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1057				coal_conf->rx_coal_type = MEDIUM_COALESCE;
1058			}
1059
1060		}
1061		else if(rx_pkt_size >= 1024){
1062			if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1063				coal_conf->rx_timeout = 2;
1064				coal_conf->rx_event_count = 3;
1065				amd8111e_set_coalesce(dev,RX_INTR_COAL);
1066				coal_conf->rx_coal_type = HIGH_COALESCE;
1067			}
1068		}
1069	}
1070    	/* NOW FOR TX INTR COALESC */
1071	if(tx_pkt_rate < 800){
1072		if(coal_conf->tx_coal_type != NO_COALESCE){
1073
1074			coal_conf->tx_timeout = 0x0;
1075			coal_conf->tx_event_count = 0;
1076			amd8111e_set_coalesce(dev,TX_INTR_COAL);
1077			coal_conf->tx_coal_type = NO_COALESCE;
1078		}
1079	}
1080	else{
1081
1082		tx_pkt_size = tx_data_rate/tx_pkt_rate;
1083		if (tx_pkt_size < 128){
1084
1085			if(coal_conf->tx_coal_type != NO_COALESCE){
1086
1087				coal_conf->tx_timeout = 0;
1088				coal_conf->tx_event_count = 0;
1089				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1090				coal_conf->tx_coal_type = NO_COALESCE;
1091			}
1092
1093		}
1094		else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1095
1096			if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1097				coal_conf->tx_timeout = 1;
1098				coal_conf->tx_event_count = 2;
1099				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1100				coal_conf->tx_coal_type = LOW_COALESCE;
1101
1102			}
1103		}
1104		else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1105
1106			if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1107				coal_conf->tx_timeout = 2;
1108				coal_conf->tx_event_count = 5;
1109				amd8111e_set_coalesce(dev,TX_INTR_COAL);
1110				coal_conf->tx_coal_type = MEDIUM_COALESCE;
1111			}
1112
1113		}
1114		else if(tx_pkt_size >= 1024){
1115			if (tx_pkt_size >= 1024){
1116				if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1117					coal_conf->tx_timeout = 4;
1118					coal_conf->tx_event_count = 8;
1119					amd8111e_set_coalesce(dev,TX_INTR_COAL);
1120					coal_conf->tx_coal_type = HIGH_COALESCE;
1121				}
1122			}
1123		}
1124	}
1125	return 0;
1126
1127}
1128/*
1129This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1130*/
1131static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1132{
1133
1134	struct net_device * dev = (struct net_device *) dev_id;
1135	struct amd8111e_priv *lp = netdev_priv(dev);
1136	void __iomem *mmio = lp->mmio;
1137	unsigned int intr0, intren0;
1138	unsigned int handled = 1;
1139
1140	if(unlikely(dev == NULL))
1141		return IRQ_NONE;
1142
1143	spin_lock(&lp->lock);
1144
1145	/* disabling interrupt */
1146	writel(INTREN, mmio + CMD0);
1147
1148	/* Read interrupt status */
1149	intr0 = readl(mmio + INT0);
1150	intren0 = readl(mmio + INTEN0);
1151
1152	/* Process all the INT event until INTR bit is clear. */
1153
1154	if (!(intr0 & INTR)){
1155		handled = 0;
1156		goto err_no_interrupt;
1157	}
1158
1159	/* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1160	writel(intr0, mmio + INT0);
1161
1162	/* Check if Receive Interrupt has occurred. */
1163	if (intr0 & RINT0) {
1164		if (napi_schedule_prep(&lp->napi)) {
1165			/* Disable receive interupts */
1166			writel(RINTEN0, mmio + INTEN0);
1167			/* Schedule a polling routine */
1168			__napi_schedule(&lp->napi);
1169		} else if (intren0 & RINTEN0) {
1170			printk("************Driver bug! interrupt while in poll\n");
1171			/* Fix by disable receive interrupts */
1172			writel(RINTEN0, mmio + INTEN0);
1173		}
1174	}
1175
1176	/* Check if  Transmit Interrupt has occurred. */
1177	if (intr0 & TINT0)
1178		amd8111e_tx(dev);
1179
1180	/* Check if  Link Change Interrupt has occurred. */
1181	if (intr0 & LCINT)
1182		amd8111e_link_change(dev);
1183
1184	/* Check if Hardware Timer Interrupt has occurred. */
1185	if (intr0 & STINT)
1186		amd8111e_calc_coalesce(dev);
1187
1188err_no_interrupt:
1189	writel( VAL0 | INTREN,mmio + CMD0);
1190
1191	spin_unlock(&lp->lock);
1192
1193	return IRQ_RETVAL(handled);
1194}
1195
1196#ifdef CONFIG_NET_POLL_CONTROLLER
1197static void amd8111e_poll(struct net_device *dev)
1198{
1199	unsigned long flags;
1200	local_irq_save(flags);
1201	amd8111e_interrupt(0, dev);
1202	local_irq_restore(flags);
1203}
1204#endif
1205
1206
1207/*
1208This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1209*/
1210static int amd8111e_close(struct net_device * dev)
1211{
1212	struct amd8111e_priv *lp = netdev_priv(dev);
1213	netif_stop_queue(dev);
1214
1215	napi_disable(&lp->napi);
1216
1217	spin_lock_irq(&lp->lock);
1218
1219	amd8111e_disable_interrupt(lp);
1220	amd8111e_stop_chip(lp);
1221
1222	/* Free transmit and receive skbs */
1223	amd8111e_free_skbs(lp->amd8111e_net_dev);
1224
1225	netif_carrier_off(lp->amd8111e_net_dev);
1226
1227	/* Delete ipg timer */
1228	if(lp->options & OPTION_DYN_IPG_ENABLE)
1229		del_timer_sync(&lp->ipg_data.ipg_timer);
1230
1231	spin_unlock_irq(&lp->lock);
1232	free_irq(dev->irq, dev);
1233	amd8111e_free_ring(lp);
1234
1235	/* Update the statistics before closing */
1236	amd8111e_get_stats(dev);
1237	lp->opened = 0;
1238	return 0;
1239}
1240/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1241*/
1242static int amd8111e_open(struct net_device * dev )
1243{
1244	struct amd8111e_priv *lp = netdev_priv(dev);
1245
1246	if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1247					 dev->name, dev))
1248		return -EAGAIN;
1249
1250	napi_enable(&lp->napi);
1251
1252	spin_lock_irq(&lp->lock);
1253
1254	amd8111e_init_hw_default(lp);
1255
1256	if(amd8111e_restart(dev)){
1257		spin_unlock_irq(&lp->lock);
1258		napi_disable(&lp->napi);
1259		if (dev->irq)
1260			free_irq(dev->irq, dev);
1261		return -ENOMEM;
1262	}
1263	/* Start ipg timer */
1264	if(lp->options & OPTION_DYN_IPG_ENABLE){
1265		add_timer(&lp->ipg_data.ipg_timer);
1266		printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1267	}
1268
1269	lp->opened = 1;
1270
1271	spin_unlock_irq(&lp->lock);
1272
1273	netif_start_queue(dev);
1274
1275	return 0;
1276}
1277/*
1278This function checks if there is any transmit  descriptors available to queue more packet.
1279*/
1280static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1281{
1282	int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1283	if (lp->tx_skbuff[tx_index])
1284		return -1;
1285	else
1286		return 0;
1287
1288}
1289/*
1290This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1291*/
1292
1293static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1294				       struct net_device * dev)
1295{
1296	struct amd8111e_priv *lp = netdev_priv(dev);
1297	int tx_index;
1298	unsigned long flags;
1299
1300	spin_lock_irqsave(&lp->lock, flags);
1301
1302	tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1303
1304	lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1305
1306	lp->tx_skbuff[tx_index] = skb;
1307	lp->tx_ring[tx_index].tx_flags = 0;
1308
1309#if AMD8111E_VLAN_TAG_USED
1310	if (vlan_tx_tag_present(skb)) {
1311		lp->tx_ring[tx_index].tag_ctrl_cmd |=
1312				cpu_to_le16(TCC_VLAN_INSERT);
1313		lp->tx_ring[tx_index].tag_ctrl_info =
1314				cpu_to_le16(vlan_tx_tag_get(skb));
1315
1316	}
1317#endif
1318	lp->tx_dma_addr[tx_index] =
1319	    pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1320	lp->tx_ring[tx_index].buff_phy_addr =
1321	    cpu_to_le32(lp->tx_dma_addr[tx_index]);
1322
1323	/*  Set FCS and LTINT bits */
1324	wmb();
1325	lp->tx_ring[tx_index].tx_flags |=
1326	    cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1327
1328	lp->tx_idx++;
1329
1330	/* Trigger an immediate send poll. */
1331	writel( VAL1 | TDMD0, lp->mmio + CMD0);
1332	writel( VAL2 | RDMD0,lp->mmio + CMD0);
1333
1334	if(amd8111e_tx_queue_avail(lp) < 0){
1335		netif_stop_queue(dev);
1336	}
1337	spin_unlock_irqrestore(&lp->lock, flags);
1338	return NETDEV_TX_OK;
1339}
1340/*
1341This function returns all the memory mapped registers of the device.
1342*/
1343static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1344{
1345	void __iomem *mmio = lp->mmio;
1346	/* Read only necessary registers */
1347	buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1348	buf[1] = readl(mmio + XMT_RING_LEN0);
1349	buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1350	buf[3] = readl(mmio + RCV_RING_LEN0);
1351	buf[4] = readl(mmio + CMD0);
1352	buf[5] = readl(mmio + CMD2);
1353	buf[6] = readl(mmio + CMD3);
1354	buf[7] = readl(mmio + CMD7);
1355	buf[8] = readl(mmio + INT0);
1356	buf[9] = readl(mmio + INTEN0);
1357	buf[10] = readl(mmio + LADRF);
1358	buf[11] = readl(mmio + LADRF+4);
1359	buf[12] = readl(mmio + STAT0);
1360}
1361
1362
1363/*
1364This function sets promiscuos mode, all-multi mode or the multicast address
1365list to the device.
1366*/
1367static void amd8111e_set_multicast_list(struct net_device *dev)
1368{
1369	struct netdev_hw_addr *ha;
1370	struct amd8111e_priv *lp = netdev_priv(dev);
1371	u32 mc_filter[2] ;
1372	int bit_num;
1373
1374	if(dev->flags & IFF_PROMISC){
1375		writel( VAL2 | PROM, lp->mmio + CMD2);
1376		return;
1377	}
1378	else
1379		writel( PROM, lp->mmio + CMD2);
1380	if (dev->flags & IFF_ALLMULTI ||
1381	    netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1382		/* get all multicast packet */
1383		mc_filter[1] = mc_filter[0] = 0xffffffff;
1384		lp->options |= OPTION_MULTICAST_ENABLE;
1385		amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1386		return;
1387	}
1388	if (netdev_mc_empty(dev)) {
1389		/* get only own packets */
1390		mc_filter[1] = mc_filter[0] = 0;
1391		lp->options &= ~OPTION_MULTICAST_ENABLE;
1392		amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1393		/* disable promiscuous mode */
1394		writel(PROM, lp->mmio + CMD2);
1395		return;
1396	}
1397	/* load all the multicast addresses in the logic filter */
1398	lp->options |= OPTION_MULTICAST_ENABLE;
1399	mc_filter[1] = mc_filter[0] = 0;
1400	netdev_for_each_mc_addr(ha, dev) {
1401		bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1402		mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1403	}
1404	amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1405
1406	/* To eliminate PCI posting bug */
1407	readl(lp->mmio + CMD2);
1408
1409}
1410
1411static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1412{
1413	struct amd8111e_priv *lp = netdev_priv(dev);
1414	struct pci_dev *pci_dev = lp->pci_dev;
1415	strcpy (info->driver, MODULE_NAME);
1416	strcpy (info->version, MODULE_VERS);
1417	sprintf(info->fw_version,"%u",chip_version);
1418	strcpy (info->bus_info, pci_name(pci_dev));
1419}
1420
1421static int amd8111e_get_regs_len(struct net_device *dev)
1422{
1423	return AMD8111E_REG_DUMP_LEN;
1424}
1425
1426static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1427{
1428	struct amd8111e_priv *lp = netdev_priv(dev);
1429	regs->version = 0;
1430	amd8111e_read_regs(lp, buf);
1431}
1432
1433static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1434{
1435	struct amd8111e_priv *lp = netdev_priv(dev);
1436	spin_lock_irq(&lp->lock);
1437	mii_ethtool_gset(&lp->mii_if, ecmd);
1438	spin_unlock_irq(&lp->lock);
1439	return 0;
1440}
1441
1442static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1443{
1444	struct amd8111e_priv *lp = netdev_priv(dev);
1445	int res;
1446	spin_lock_irq(&lp->lock);
1447	res = mii_ethtool_sset(&lp->mii_if, ecmd);
1448	spin_unlock_irq(&lp->lock);
1449	return res;
1450}
1451
1452static int amd8111e_nway_reset(struct net_device *dev)
1453{
1454	struct amd8111e_priv *lp = netdev_priv(dev);
1455	return mii_nway_restart(&lp->mii_if);
1456}
1457
1458static u32 amd8111e_get_link(struct net_device *dev)
1459{
1460	struct amd8111e_priv *lp = netdev_priv(dev);
1461	return mii_link_ok(&lp->mii_if);
1462}
1463
1464static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1465{
1466	struct amd8111e_priv *lp = netdev_priv(dev);
1467	wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1468	if (lp->options & OPTION_WOL_ENABLE)
1469		wol_info->wolopts = WAKE_MAGIC;
1470}
1471
1472static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1473{
1474	struct amd8111e_priv *lp = netdev_priv(dev);
1475	if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1476		return -EINVAL;
1477	spin_lock_irq(&lp->lock);
1478	if (wol_info->wolopts & WAKE_MAGIC)
1479		lp->options |=
1480			(OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1481	else if(wol_info->wolopts & WAKE_PHY)
1482		lp->options |=
1483			(OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1484	else
1485		lp->options &= ~OPTION_WOL_ENABLE;
1486	spin_unlock_irq(&lp->lock);
1487	return 0;
1488}
1489
1490static const struct ethtool_ops ops = {
1491	.get_drvinfo = amd8111e_get_drvinfo,
1492	.get_regs_len = amd8111e_get_regs_len,
1493	.get_regs = amd8111e_get_regs,
1494	.get_settings = amd8111e_get_settings,
1495	.set_settings = amd8111e_set_settings,
1496	.nway_reset = amd8111e_nway_reset,
1497	.get_link = amd8111e_get_link,
1498	.get_wol = amd8111e_get_wol,
1499	.set_wol = amd8111e_set_wol,
1500};
1501
1502/*
1503This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1504*/
1505
1506static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1507{
1508	struct mii_ioctl_data *data = if_mii(ifr);
1509	struct amd8111e_priv *lp = netdev_priv(dev);
1510	int err;
1511	u32 mii_regval;
1512
1513	switch(cmd) {
1514	case SIOCGMIIPHY:
1515		data->phy_id = lp->ext_phy_addr;
1516
1517	/* fallthru */
1518	case SIOCGMIIREG:
1519
1520		spin_lock_irq(&lp->lock);
1521		err = amd8111e_read_phy(lp, data->phy_id,
1522			data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1523		spin_unlock_irq(&lp->lock);
1524
1525		data->val_out = mii_regval;
1526		return err;
1527
1528	case SIOCSMIIREG:
1529
1530		spin_lock_irq(&lp->lock);
1531		err = amd8111e_write_phy(lp, data->phy_id,
1532			data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1533		spin_unlock_irq(&lp->lock);
1534
1535		return err;
1536
1537	default:
1538		/* do nothing */
1539		break;
1540	}
1541	return -EOPNOTSUPP;
1542}
1543static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1544{
1545	struct amd8111e_priv *lp = netdev_priv(dev);
1546	int i;
1547	struct sockaddr *addr = p;
1548
1549	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1550	spin_lock_irq(&lp->lock);
1551	/* Setting the MAC address to the device */
1552	for(i = 0; i < ETH_ADDR_LEN; i++)
1553		writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1554
1555	spin_unlock_irq(&lp->lock);
1556
1557	return 0;
1558}
1559
1560/*
1561This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1562*/
1563static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1564{
1565	struct amd8111e_priv *lp = netdev_priv(dev);
1566	int err;
1567
1568	if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1569		return -EINVAL;
1570
1571	if (!netif_running(dev)) {
1572		/* new_mtu will be used
1573		   when device starts netxt time */
1574		dev->mtu = new_mtu;
1575		return 0;
1576	}
1577
1578	spin_lock_irq(&lp->lock);
1579
1580        /* stop the chip */
1581	writel(RUN, lp->mmio + CMD0);
1582
1583	dev->mtu = new_mtu;
1584
1585	err = amd8111e_restart(dev);
1586	spin_unlock_irq(&lp->lock);
1587	if(!err)
1588		netif_start_queue(dev);
1589	return err;
1590}
1591
1592static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1593{
1594	writel( VAL1|MPPLBA, lp->mmio + CMD3);
1595	writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1596
1597	/* To eliminate PCI posting bug */
1598	readl(lp->mmio + CMD7);
1599	return 0;
1600}
1601
1602static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1603{
1604
1605	/* Adapter is already stoped/suspended/interrupt-disabled */
1606	writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1607
1608	/* To eliminate PCI posting bug */
1609	readl(lp->mmio + CMD7);
1610	return 0;
1611}
1612
1613/*
1614 * This function is called when a packet transmission fails to complete
1615 * within a reasonable period, on the assumption that an interrupt have
1616 * failed or the interface is locked up. This function will reinitialize
1617 * the hardware.
1618 */
1619static void amd8111e_tx_timeout(struct net_device *dev)
1620{
1621	struct amd8111e_priv* lp = netdev_priv(dev);
1622	int err;
1623
1624	printk(KERN_ERR "%s: transmit timed out, resetting\n",
1625	 					      dev->name);
1626	spin_lock_irq(&lp->lock);
1627	err = amd8111e_restart(dev);
1628	spin_unlock_irq(&lp->lock);
1629	if(!err)
1630		netif_wake_queue(dev);
1631}
1632static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1633{
1634	struct net_device *dev = pci_get_drvdata(pci_dev);
1635	struct amd8111e_priv *lp = netdev_priv(dev);
1636
1637	if (!netif_running(dev))
1638		return 0;
1639
1640	/* disable the interrupt */
1641	spin_lock_irq(&lp->lock);
1642	amd8111e_disable_interrupt(lp);
1643	spin_unlock_irq(&lp->lock);
1644
1645	netif_device_detach(dev);
1646
1647	/* stop chip */
1648	spin_lock_irq(&lp->lock);
1649	if(lp->options & OPTION_DYN_IPG_ENABLE)
1650		del_timer_sync(&lp->ipg_data.ipg_timer);
1651	amd8111e_stop_chip(lp);
1652	spin_unlock_irq(&lp->lock);
1653
1654	if(lp->options & OPTION_WOL_ENABLE){
1655		 /* enable wol */
1656		if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1657			amd8111e_enable_magicpkt(lp);
1658		if(lp->options & OPTION_WAKE_PHY_ENABLE)
1659			amd8111e_enable_link_change(lp);
1660
1661		pci_enable_wake(pci_dev, PCI_D3hot, 1);
1662		pci_enable_wake(pci_dev, PCI_D3cold, 1);
1663
1664	}
1665	else{
1666		pci_enable_wake(pci_dev, PCI_D3hot, 0);
1667		pci_enable_wake(pci_dev, PCI_D3cold, 0);
1668	}
1669
1670	pci_save_state(pci_dev);
1671	pci_set_power_state(pci_dev, PCI_D3hot);
1672
1673	return 0;
1674}
1675static int amd8111e_resume(struct pci_dev *pci_dev)
1676{
1677	struct net_device *dev = pci_get_drvdata(pci_dev);
1678	struct amd8111e_priv *lp = netdev_priv(dev);
1679
1680	if (!netif_running(dev))
1681		return 0;
1682
1683	pci_set_power_state(pci_dev, PCI_D0);
1684	pci_restore_state(pci_dev);
1685
1686	pci_enable_wake(pci_dev, PCI_D3hot, 0);
1687	pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1688
1689	netif_device_attach(dev);
1690
1691	spin_lock_irq(&lp->lock);
1692	amd8111e_restart(dev);
1693	/* Restart ipg timer */
1694	if(lp->options & OPTION_DYN_IPG_ENABLE)
1695		mod_timer(&lp->ipg_data.ipg_timer,
1696				jiffies + IPG_CONVERGE_JIFFIES);
1697	spin_unlock_irq(&lp->lock);
1698
1699	return 0;
1700}
1701
1702
1703static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1704{
1705	struct net_device *dev = pci_get_drvdata(pdev);
1706	if (dev) {
1707		unregister_netdev(dev);
1708		iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1709		free_netdev(dev);
1710		pci_release_regions(pdev);
1711		pci_disable_device(pdev);
1712		pci_set_drvdata(pdev, NULL);
1713	}
1714}
1715static void amd8111e_config_ipg(struct net_device* dev)
1716{
1717	struct amd8111e_priv *lp = netdev_priv(dev);
1718	struct ipg_info* ipg_data = &lp->ipg_data;
1719	void __iomem *mmio = lp->mmio;
1720	unsigned int prev_col_cnt = ipg_data->col_cnt;
1721	unsigned int total_col_cnt;
1722	unsigned int tmp_ipg;
1723
1724	if(lp->link_config.duplex == DUPLEX_FULL){
1725		ipg_data->ipg = DEFAULT_IPG;
1726		return;
1727	}
1728
1729	if(ipg_data->ipg_state == SSTATE){
1730
1731		if(ipg_data->timer_tick == IPG_STABLE_TIME){
1732
1733			ipg_data->timer_tick = 0;
1734			ipg_data->ipg = MIN_IPG - IPG_STEP;
1735			ipg_data->current_ipg = MIN_IPG;
1736			ipg_data->diff_col_cnt = 0xFFFFFFFF;
1737			ipg_data->ipg_state = CSTATE;
1738		}
1739		else
1740			ipg_data->timer_tick++;
1741	}
1742
1743	if(ipg_data->ipg_state == CSTATE){
1744
1745		/* Get the current collision count */
1746
1747		total_col_cnt = ipg_data->col_cnt =
1748				amd8111e_read_mib(mmio, xmt_collisions);
1749
1750		if ((total_col_cnt - prev_col_cnt) <
1751				(ipg_data->diff_col_cnt)){
1752
1753			ipg_data->diff_col_cnt =
1754				total_col_cnt - prev_col_cnt ;
1755
1756			ipg_data->ipg = ipg_data->current_ipg;
1757		}
1758
1759		ipg_data->current_ipg += IPG_STEP;
1760
1761		if (ipg_data->current_ipg <= MAX_IPG)
1762			tmp_ipg = ipg_data->current_ipg;
1763		else{
1764			tmp_ipg = ipg_data->ipg;
1765			ipg_data->ipg_state = SSTATE;
1766		}
1767		writew((u32)tmp_ipg, mmio + IPG);
1768		writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1769	}
1770	 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1771	return;
1772
1773}
1774
1775static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1776{
1777	struct amd8111e_priv *lp = netdev_priv(dev);
1778	int i;
1779
1780	for (i = 0x1e; i >= 0; i--) {
1781		u32 id1, id2;
1782
1783		if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1784			continue;
1785		if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1786			continue;
1787		lp->ext_phy_id = (id1 << 16) | id2;
1788		lp->ext_phy_addr = i;
1789		return;
1790	}
1791	lp->ext_phy_id = 0;
1792	lp->ext_phy_addr = 1;
1793}
1794
1795static const struct net_device_ops amd8111e_netdev_ops = {
1796	.ndo_open		= amd8111e_open,
1797	.ndo_stop		= amd8111e_close,
1798	.ndo_start_xmit		= amd8111e_start_xmit,
1799	.ndo_tx_timeout		= amd8111e_tx_timeout,
1800	.ndo_get_stats		= amd8111e_get_stats,
1801	.ndo_set_multicast_list = amd8111e_set_multicast_list,
1802	.ndo_validate_addr	= eth_validate_addr,
1803	.ndo_set_mac_address	= amd8111e_set_mac_address,
1804	.ndo_do_ioctl		= amd8111e_ioctl,
1805	.ndo_change_mtu		= amd8111e_change_mtu,
1806#ifdef CONFIG_NET_POLL_CONTROLLER
1807	.ndo_poll_controller	 = amd8111e_poll,
1808#endif
1809};
1810
1811static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1812				  const struct pci_device_id *ent)
1813{
1814	int err,i,pm_cap;
1815	unsigned long reg_addr,reg_len;
1816	struct amd8111e_priv* lp;
1817	struct net_device* dev;
1818
1819	err = pci_enable_device(pdev);
1820	if(err){
1821		printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1822			"exiting.\n");
1823		return err;
1824	}
1825
1826	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1827		printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1828		       "exiting.\n");
1829		err = -ENODEV;
1830		goto err_disable_pdev;
1831	}
1832
1833	err = pci_request_regions(pdev, MODULE_NAME);
1834	if(err){
1835		printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1836		       "exiting.\n");
1837		goto err_disable_pdev;
1838	}
1839
1840	pci_set_master(pdev);
1841
1842	/* Find power-management capability. */
1843	if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1844		printk(KERN_ERR "amd8111e: No Power Management capability, "
1845		       "exiting.\n");
1846		goto err_free_reg;
1847	}
1848
1849	/* Initialize DMA */
1850	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1851		printk(KERN_ERR "amd8111e: DMA not supported,"
1852			"exiting.\n");
1853		goto err_free_reg;
1854	}
1855
1856	reg_addr = pci_resource_start(pdev, 0);
1857	reg_len = pci_resource_len(pdev, 0);
1858
1859	dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1860	if (!dev) {
1861		printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1862		err = -ENOMEM;
1863		goto err_free_reg;
1864	}
1865
1866	SET_NETDEV_DEV(dev, &pdev->dev);
1867
1868#if AMD8111E_VLAN_TAG_USED
1869	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1870#endif
1871
1872	lp = netdev_priv(dev);
1873	lp->pci_dev = pdev;
1874	lp->amd8111e_net_dev = dev;
1875	lp->pm_cap = pm_cap;
1876
1877	spin_lock_init(&lp->lock);
1878
1879	lp->mmio = ioremap(reg_addr, reg_len);
1880	if (!lp->mmio) {
1881		printk(KERN_ERR "amd8111e: Cannot map device registers, "
1882		       "exiting\n");
1883		err = -ENOMEM;
1884		goto err_free_dev;
1885	}
1886
1887	/* Initializing MAC address */
1888	for(i = 0; i < ETH_ADDR_LEN; i++)
1889		dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1890
1891	/* Setting user defined parametrs */
1892	lp->ext_phy_option = speed_duplex[card_idx];
1893	if(coalesce[card_idx])
1894		lp->options |= OPTION_INTR_COAL_ENABLE;
1895	if(dynamic_ipg[card_idx++])
1896		lp->options |= OPTION_DYN_IPG_ENABLE;
1897
1898
1899	/* Initialize driver entry points */
1900	dev->netdev_ops = &amd8111e_netdev_ops;
1901	SET_ETHTOOL_OPS(dev, &ops);
1902	dev->irq =pdev->irq;
1903	dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1904	netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1905
1906#if AMD8111E_VLAN_TAG_USED
1907	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1908#endif
1909	/* Probe the external PHY */
1910	amd8111e_probe_ext_phy(dev);
1911
1912	/* setting mii default values */
1913	lp->mii_if.dev = dev;
1914	lp->mii_if.mdio_read = amd8111e_mdio_read;
1915	lp->mii_if.mdio_write = amd8111e_mdio_write;
1916	lp->mii_if.phy_id = lp->ext_phy_addr;
1917
1918	/* Set receive buffer length and set jumbo option*/
1919	amd8111e_set_rx_buff_len(dev);
1920
1921
1922	err = register_netdev(dev);
1923	if (err) {
1924		printk(KERN_ERR "amd8111e: Cannot register net device, "
1925		       "exiting.\n");
1926		goto err_iounmap;
1927	}
1928
1929	pci_set_drvdata(pdev, dev);
1930
1931	/* Initialize software ipg timer */
1932	if(lp->options & OPTION_DYN_IPG_ENABLE){
1933		init_timer(&lp->ipg_data.ipg_timer);
1934		lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1935		lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1936		lp->ipg_data.ipg_timer.expires = jiffies +
1937						 IPG_CONVERGE_JIFFIES;
1938		lp->ipg_data.ipg = DEFAULT_IPG;
1939		lp->ipg_data.ipg_state = CSTATE;
1940	}
1941
1942	/*  display driver and device information */
1943
1944    	chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1945	printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1946	       dev->name,MODULE_VERS);
1947	printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1948	       dev->name, chip_version, dev->dev_addr);
1949	if (lp->ext_phy_id)
1950		printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1951		       dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1952	else
1953		printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1954		       dev->name);
1955    	return 0;
1956err_iounmap:
1957	iounmap(lp->mmio);
1958
1959err_free_dev:
1960	free_netdev(dev);
1961
1962err_free_reg:
1963	pci_release_regions(pdev);
1964
1965err_disable_pdev:
1966	pci_disable_device(pdev);
1967	pci_set_drvdata(pdev, NULL);
1968	return err;
1969
1970}
1971
1972static struct pci_driver amd8111e_driver = {
1973	.name   	= MODULE_NAME,
1974	.id_table	= amd8111e_pci_tbl,
1975	.probe		= amd8111e_probe_one,
1976	.remove		= __devexit_p(amd8111e_remove_one),
1977	.suspend	= amd8111e_suspend,
1978	.resume		= amd8111e_resume
1979};
1980
1981static int __init amd8111e_init(void)
1982{
1983	return pci_register_driver(&amd8111e_driver);
1984}
1985
1986static void __exit amd8111e_cleanup(void)
1987{
1988	pci_unregister_driver(&amd8111e_driver);
1989}
1990
1991module_init(amd8111e_init);
1992module_exit(amd8111e_cleanup);