Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
   3 *
   4 * Note: This driver is a cleanroom reimplementation based on reverse
   5 *      engineered documentation written by Carl-Daniel Hailfinger
   6 *      and Andrew de Quincey.
   7 *
   8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
   9 * trademarks of NVIDIA Corporation in the United States and other
  10 * countries.
  11 *
  12 * Copyright (C) 2003,4,5 Manfred Spraul
  13 * Copyright (C) 2004 Andrew de Quincey (wol support)
  14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  15 *		IRQ rate fixes, bigendian fixes, cleanups, verification)
  16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  17 *
  18 * This program is free software; you can redistribute it and/or modify
  19 * it under the terms of the GNU General Public License as published by
  20 * the Free Software Foundation; either version 2 of the License, or
  21 * (at your option) any later version.
  22 *
  23 * This program is distributed in the hope that it will be useful,
  24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  26 * GNU General Public License for more details.
  27 *
  28 * You should have received a copy of the GNU General Public License
  29 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  30 *
  31 * Known bugs:
  32 * We suspect that on some hardware no TX done interrupts are generated.
  33 * This means recovery from netif_stop_queue only happens if the hw timer
  34 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  35 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  36 * If your hardware reliably generates tx done interrupts, then you can remove
  37 * DEV_NEED_TIMERIRQ from the driver_data flags.
  38 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  39 * superfluous timer interrupts from the nic.
  40 */
  41
  42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43
  44#define FORCEDETH_VERSION		"0.64"
  45#define DRV_NAME			"forcedeth"
  46
  47#include <linux/module.h>
  48#include <linux/types.h>
  49#include <linux/pci.h>
  50#include <linux/interrupt.h>
  51#include <linux/netdevice.h>
  52#include <linux/etherdevice.h>
  53#include <linux/delay.h>
  54#include <linux/sched.h>
  55#include <linux/spinlock.h>
  56#include <linux/ethtool.h>
  57#include <linux/timer.h>
  58#include <linux/skbuff.h>
  59#include <linux/mii.h>
  60#include <linux/random.h>
  61#include <linux/if_vlan.h>
  62#include <linux/dma-mapping.h>
  63#include <linux/slab.h>
  64#include <linux/uaccess.h>
  65#include <linux/prefetch.h>
  66#include <linux/u64_stats_sync.h>
  67#include <linux/io.h>
  68
  69#include <asm/irq.h>
  70
  71#define TX_WORK_PER_LOOP  64
  72#define RX_WORK_PER_LOOP  64
  73
  74/*
  75 * Hardware access:
  76 */
  77
  78#define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
  79#define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
  80#define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
  81#define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
  82#define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
  83#define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
  84#define DEV_HAS_MSI                0x0000040  /* device supports MSI */
  85#define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
  86#define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
  87#define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
  88#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
  89#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
  90#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
  91#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
  92#define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
  93#define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
  94#define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
  95#define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
  96#define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
  97#define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
  98#define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
  99#define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
 100#define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
 101#define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
 102#define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
 103#define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
 104#define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
 105
 106enum {
 107	NvRegIrqStatus = 0x000,
 108#define NVREG_IRQSTAT_MIIEVENT	0x040
 109#define NVREG_IRQSTAT_MASK		0x83ff
 110	NvRegIrqMask = 0x004,
 111#define NVREG_IRQ_RX_ERROR		0x0001
 112#define NVREG_IRQ_RX			0x0002
 113#define NVREG_IRQ_RX_NOBUF		0x0004
 114#define NVREG_IRQ_TX_ERR		0x0008
 115#define NVREG_IRQ_TX_OK			0x0010
 116#define NVREG_IRQ_TIMER			0x0020
 117#define NVREG_IRQ_LINK			0x0040
 118#define NVREG_IRQ_RX_FORCED		0x0080
 119#define NVREG_IRQ_TX_FORCED		0x0100
 120#define NVREG_IRQ_RECOVER_ERROR		0x8200
 121#define NVREG_IRQMASK_THROUGHPUT	0x00df
 122#define NVREG_IRQMASK_CPU		0x0060
 123#define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
 124#define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
 125#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
 126
 127	NvRegUnknownSetupReg6 = 0x008,
 128#define NVREG_UNKSETUP6_VAL		3
 129
 130/*
 131 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
 132 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
 133 */
 134	NvRegPollingInterval = 0x00c,
 135#define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
 136#define NVREG_POLL_DEFAULT_CPU	13
 137	NvRegMSIMap0 = 0x020,
 138	NvRegMSIMap1 = 0x024,
 139	NvRegMSIIrqMask = 0x030,
 140#define NVREG_MSI_VECTOR_0_ENABLED 0x01
 141	NvRegMisc1 = 0x080,
 142#define NVREG_MISC1_PAUSE_TX	0x01
 143#define NVREG_MISC1_HD		0x02
 144#define NVREG_MISC1_FORCE	0x3b0f3c
 145
 146	NvRegMacReset = 0x34,
 147#define NVREG_MAC_RESET_ASSERT	0x0F3
 148	NvRegTransmitterControl = 0x084,
 149#define NVREG_XMITCTL_START	0x01
 150#define NVREG_XMITCTL_MGMT_ST	0x40000000
 151#define NVREG_XMITCTL_SYNC_MASK		0x000f0000
 152#define NVREG_XMITCTL_SYNC_NOT_READY	0x0
 153#define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
 154#define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
 155#define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
 156#define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
 157#define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
 158#define NVREG_XMITCTL_HOST_LOADED	0x00004000
 159#define NVREG_XMITCTL_TX_PATH_EN	0x01000000
 160#define NVREG_XMITCTL_DATA_START	0x00100000
 161#define NVREG_XMITCTL_DATA_READY	0x00010000
 162#define NVREG_XMITCTL_DATA_ERROR	0x00020000
 163	NvRegTransmitterStatus = 0x088,
 164#define NVREG_XMITSTAT_BUSY	0x01
 165
 166	NvRegPacketFilterFlags = 0x8c,
 167#define NVREG_PFF_PAUSE_RX	0x08
 168#define NVREG_PFF_ALWAYS	0x7F0000
 169#define NVREG_PFF_PROMISC	0x80
 170#define NVREG_PFF_MYADDR	0x20
 171#define NVREG_PFF_LOOPBACK	0x10
 172
 173	NvRegOffloadConfig = 0x90,
 174#define NVREG_OFFLOAD_HOMEPHY	0x601
 175#define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
 176	NvRegReceiverControl = 0x094,
 177#define NVREG_RCVCTL_START	0x01
 178#define NVREG_RCVCTL_RX_PATH_EN	0x01000000
 179	NvRegReceiverStatus = 0x98,
 180#define NVREG_RCVSTAT_BUSY	0x01
 181
 182	NvRegSlotTime = 0x9c,
 183#define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
 184#define NVREG_SLOTTIME_10_100_FULL	0x00007f00
 185#define NVREG_SLOTTIME_1000_FULL	0x0003ff00
 186#define NVREG_SLOTTIME_HALF		0x0000ff00
 187#define NVREG_SLOTTIME_DEFAULT		0x00007f00
 188#define NVREG_SLOTTIME_MASK		0x000000ff
 189
 190	NvRegTxDeferral = 0xA0,
 191#define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
 192#define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
 193#define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
 194#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
 195#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
 196#define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
 197	NvRegRxDeferral = 0xA4,
 198#define NVREG_RX_DEFERRAL_DEFAULT	0x16
 199	NvRegMacAddrA = 0xA8,
 200	NvRegMacAddrB = 0xAC,
 201	NvRegMulticastAddrA = 0xB0,
 202#define NVREG_MCASTADDRA_FORCE	0x01
 203	NvRegMulticastAddrB = 0xB4,
 204	NvRegMulticastMaskA = 0xB8,
 205#define NVREG_MCASTMASKA_NONE		0xffffffff
 206	NvRegMulticastMaskB = 0xBC,
 207#define NVREG_MCASTMASKB_NONE		0xffff
 208
 209	NvRegPhyInterface = 0xC0,
 210#define PHY_RGMII		0x10000000
 211	NvRegBackOffControl = 0xC4,
 212#define NVREG_BKOFFCTRL_DEFAULT			0x70000000
 213#define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
 214#define NVREG_BKOFFCTRL_SELECT			24
 215#define NVREG_BKOFFCTRL_GEAR			12
 216
 217	NvRegTxRingPhysAddr = 0x100,
 218	NvRegRxRingPhysAddr = 0x104,
 219	NvRegRingSizes = 0x108,
 220#define NVREG_RINGSZ_TXSHIFT 0
 221#define NVREG_RINGSZ_RXSHIFT 16
 222	NvRegTransmitPoll = 0x10c,
 223#define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
 224	NvRegLinkSpeed = 0x110,
 225#define NVREG_LINKSPEED_FORCE 0x10000
 226#define NVREG_LINKSPEED_10	1000
 227#define NVREG_LINKSPEED_100	100
 228#define NVREG_LINKSPEED_1000	50
 229#define NVREG_LINKSPEED_MASK	(0xFFF)
 230	NvRegUnknownSetupReg5 = 0x130,
 231#define NVREG_UNKSETUP5_BIT31	(1<<31)
 232	NvRegTxWatermark = 0x13c,
 233#define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
 234#define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
 235#define NVREG_TX_WM_DESC2_3_1000	0xfe08000
 236	NvRegTxRxControl = 0x144,
 237#define NVREG_TXRXCTL_KICK	0x0001
 238#define NVREG_TXRXCTL_BIT1	0x0002
 239#define NVREG_TXRXCTL_BIT2	0x0004
 240#define NVREG_TXRXCTL_IDLE	0x0008
 241#define NVREG_TXRXCTL_RESET	0x0010
 242#define NVREG_TXRXCTL_RXCHECK	0x0400
 243#define NVREG_TXRXCTL_DESC_1	0
 244#define NVREG_TXRXCTL_DESC_2	0x002100
 245#define NVREG_TXRXCTL_DESC_3	0xc02200
 246#define NVREG_TXRXCTL_VLANSTRIP 0x00040
 247#define NVREG_TXRXCTL_VLANINS	0x00080
 248	NvRegTxRingPhysAddrHigh = 0x148,
 249	NvRegRxRingPhysAddrHigh = 0x14C,
 250	NvRegTxPauseFrame = 0x170,
 251#define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
 252#define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
 253#define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
 254#define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
 255	NvRegTxPauseFrameLimit = 0x174,
 256#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
 257	NvRegMIIStatus = 0x180,
 258#define NVREG_MIISTAT_ERROR		0x0001
 259#define NVREG_MIISTAT_LINKCHANGE	0x0008
 260#define NVREG_MIISTAT_MASK_RW		0x0007
 261#define NVREG_MIISTAT_MASK_ALL		0x000f
 262	NvRegMIIMask = 0x184,
 263#define NVREG_MII_LINKCHANGE		0x0008
 264
 265	NvRegAdapterControl = 0x188,
 266#define NVREG_ADAPTCTL_START	0x02
 267#define NVREG_ADAPTCTL_LINKUP	0x04
 268#define NVREG_ADAPTCTL_PHYVALID	0x40000
 269#define NVREG_ADAPTCTL_RUNNING	0x100000
 270#define NVREG_ADAPTCTL_PHYSHIFT	24
 271	NvRegMIISpeed = 0x18c,
 272#define NVREG_MIISPEED_BIT8	(1<<8)
 273#define NVREG_MIIDELAY	5
 274	NvRegMIIControl = 0x190,
 275#define NVREG_MIICTL_INUSE	0x08000
 276#define NVREG_MIICTL_WRITE	0x00400
 277#define NVREG_MIICTL_ADDRSHIFT	5
 278	NvRegMIIData = 0x194,
 279	NvRegTxUnicast = 0x1a0,
 280	NvRegTxMulticast = 0x1a4,
 281	NvRegTxBroadcast = 0x1a8,
 282	NvRegWakeUpFlags = 0x200,
 283#define NVREG_WAKEUPFLAGS_VAL		0x7770
 284#define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
 285#define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
 286#define NVREG_WAKEUPFLAGS_D3SHIFT	12
 287#define NVREG_WAKEUPFLAGS_D2SHIFT	8
 288#define NVREG_WAKEUPFLAGS_D1SHIFT	4
 289#define NVREG_WAKEUPFLAGS_D0SHIFT	0
 290#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
 291#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
 292#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
 293#define NVREG_WAKEUPFLAGS_ENABLE	0x1111
 294
 295	NvRegMgmtUnitGetVersion = 0x204,
 296#define NVREG_MGMTUNITGETVERSION	0x01
 297	NvRegMgmtUnitVersion = 0x208,
 298#define NVREG_MGMTUNITVERSION		0x08
 299	NvRegPowerCap = 0x268,
 300#define NVREG_POWERCAP_D3SUPP	(1<<30)
 301#define NVREG_POWERCAP_D2SUPP	(1<<26)
 302#define NVREG_POWERCAP_D1SUPP	(1<<25)
 303	NvRegPowerState = 0x26c,
 304#define NVREG_POWERSTATE_POWEREDUP	0x8000
 305#define NVREG_POWERSTATE_VALID		0x0100
 306#define NVREG_POWERSTATE_MASK		0x0003
 307#define NVREG_POWERSTATE_D0		0x0000
 308#define NVREG_POWERSTATE_D1		0x0001
 309#define NVREG_POWERSTATE_D2		0x0002
 310#define NVREG_POWERSTATE_D3		0x0003
 311	NvRegMgmtUnitControl = 0x278,
 312#define NVREG_MGMTUNITCONTROL_INUSE	0x20000
 313	NvRegTxCnt = 0x280,
 314	NvRegTxZeroReXmt = 0x284,
 315	NvRegTxOneReXmt = 0x288,
 316	NvRegTxManyReXmt = 0x28c,
 317	NvRegTxLateCol = 0x290,
 318	NvRegTxUnderflow = 0x294,
 319	NvRegTxLossCarrier = 0x298,
 320	NvRegTxExcessDef = 0x29c,
 321	NvRegTxRetryErr = 0x2a0,
 322	NvRegRxFrameErr = 0x2a4,
 323	NvRegRxExtraByte = 0x2a8,
 324	NvRegRxLateCol = 0x2ac,
 325	NvRegRxRunt = 0x2b0,
 326	NvRegRxFrameTooLong = 0x2b4,
 327	NvRegRxOverflow = 0x2b8,
 328	NvRegRxFCSErr = 0x2bc,
 329	NvRegRxFrameAlignErr = 0x2c0,
 330	NvRegRxLenErr = 0x2c4,
 331	NvRegRxUnicast = 0x2c8,
 332	NvRegRxMulticast = 0x2cc,
 333	NvRegRxBroadcast = 0x2d0,
 334	NvRegTxDef = 0x2d4,
 335	NvRegTxFrame = 0x2d8,
 336	NvRegRxCnt = 0x2dc,
 337	NvRegTxPause = 0x2e0,
 338	NvRegRxPause = 0x2e4,
 339	NvRegRxDropFrame = 0x2e8,
 340	NvRegVlanControl = 0x300,
 341#define NVREG_VLANCONTROL_ENABLE	0x2000
 342	NvRegMSIXMap0 = 0x3e0,
 343	NvRegMSIXMap1 = 0x3e4,
 344	NvRegMSIXIrqStatus = 0x3f0,
 345
 346	NvRegPowerState2 = 0x600,
 347#define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
 348#define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
 349#define NVREG_POWERSTATE2_PHY_RESET		0x0004
 350#define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
 351};
 352
 353/* Big endian: should work, but is untested */
 354struct ring_desc {
 355	__le32 buf;
 356	__le32 flaglen;
 357};
 358
 359struct ring_desc_ex {
 360	__le32 bufhigh;
 361	__le32 buflow;
 362	__le32 txvlan;
 363	__le32 flaglen;
 364};
 365
 366union ring_type {
 367	struct ring_desc *orig;
 368	struct ring_desc_ex *ex;
 369};
 370
 371#define FLAG_MASK_V1 0xffff0000
 372#define FLAG_MASK_V2 0xffffc000
 373#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
 374#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
 375
 376#define NV_TX_LASTPACKET	(1<<16)
 377#define NV_TX_RETRYERROR	(1<<19)
 378#define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
 379#define NV_TX_FORCED_INTERRUPT	(1<<24)
 380#define NV_TX_DEFERRED		(1<<26)
 381#define NV_TX_CARRIERLOST	(1<<27)
 382#define NV_TX_LATECOLLISION	(1<<28)
 383#define NV_TX_UNDERFLOW		(1<<29)
 384#define NV_TX_ERROR		(1<<30)
 385#define NV_TX_VALID		(1<<31)
 386
 387#define NV_TX2_LASTPACKET	(1<<29)
 388#define NV_TX2_RETRYERROR	(1<<18)
 389#define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
 390#define NV_TX2_FORCED_INTERRUPT	(1<<30)
 391#define NV_TX2_DEFERRED		(1<<25)
 392#define NV_TX2_CARRIERLOST	(1<<26)
 393#define NV_TX2_LATECOLLISION	(1<<27)
 394#define NV_TX2_UNDERFLOW	(1<<28)
 395/* error and valid are the same for both */
 396#define NV_TX2_ERROR		(1<<30)
 397#define NV_TX2_VALID		(1<<31)
 398#define NV_TX2_TSO		(1<<28)
 399#define NV_TX2_TSO_SHIFT	14
 400#define NV_TX2_TSO_MAX_SHIFT	14
 401#define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
 402#define NV_TX2_CHECKSUM_L3	(1<<27)
 403#define NV_TX2_CHECKSUM_L4	(1<<26)
 404
 405#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
 406
 407#define NV_RX_DESCRIPTORVALID	(1<<16)
 408#define NV_RX_MISSEDFRAME	(1<<17)
 409#define NV_RX_SUBSTRACT1	(1<<18)
 410#define NV_RX_ERROR1		(1<<23)
 411#define NV_RX_ERROR2		(1<<24)
 412#define NV_RX_ERROR3		(1<<25)
 413#define NV_RX_ERROR4		(1<<26)
 414#define NV_RX_CRCERR		(1<<27)
 415#define NV_RX_OVERFLOW		(1<<28)
 416#define NV_RX_FRAMINGERR	(1<<29)
 417#define NV_RX_ERROR		(1<<30)
 418#define NV_RX_AVAIL		(1<<31)
 419#define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
 420
 421#define NV_RX2_CHECKSUMMASK	(0x1C000000)
 422#define NV_RX2_CHECKSUM_IP	(0x10000000)
 423#define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
 424#define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
 425#define NV_RX2_DESCRIPTORVALID	(1<<29)
 426#define NV_RX2_SUBSTRACT1	(1<<25)
 427#define NV_RX2_ERROR1		(1<<18)
 428#define NV_RX2_ERROR2		(1<<19)
 429#define NV_RX2_ERROR3		(1<<20)
 430#define NV_RX2_ERROR4		(1<<21)
 431#define NV_RX2_CRCERR		(1<<22)
 432#define NV_RX2_OVERFLOW		(1<<23)
 433#define NV_RX2_FRAMINGERR	(1<<24)
 434/* error and avail are the same for both */
 435#define NV_RX2_ERROR		(1<<30)
 436#define NV_RX2_AVAIL		(1<<31)
 437#define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
 438
 439#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
 440#define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
 441
 442/* Miscellaneous hardware related defines: */
 443#define NV_PCI_REGSZ_VER1	0x270
 444#define NV_PCI_REGSZ_VER2	0x2d4
 445#define NV_PCI_REGSZ_VER3	0x604
 446#define NV_PCI_REGSZ_MAX	0x604
 447
 448/* various timeout delays: all in usec */
 449#define NV_TXRX_RESET_DELAY	4
 450#define NV_TXSTOP_DELAY1	10
 451#define NV_TXSTOP_DELAY1MAX	500000
 452#define NV_TXSTOP_DELAY2	100
 453#define NV_RXSTOP_DELAY1	10
 454#define NV_RXSTOP_DELAY1MAX	500000
 455#define NV_RXSTOP_DELAY2	100
 456#define NV_SETUP5_DELAY		5
 457#define NV_SETUP5_DELAYMAX	50000
 458#define NV_POWERUP_DELAY	5
 459#define NV_POWERUP_DELAYMAX	5000
 460#define NV_MIIBUSY_DELAY	50
 461#define NV_MIIPHY_DELAY	10
 462#define NV_MIIPHY_DELAYMAX	10000
 463#define NV_MAC_RESET_DELAY	64
 464
 465#define NV_WAKEUPPATTERNS	5
 466#define NV_WAKEUPMASKENTRIES	4
 467
 468/* General driver defaults */
 469#define NV_WATCHDOG_TIMEO	(5*HZ)
 470
 471#define RX_RING_DEFAULT		512
 472#define TX_RING_DEFAULT		256
 473#define RX_RING_MIN		128
 474#define TX_RING_MIN		64
 475#define RING_MAX_DESC_VER_1	1024
 476#define RING_MAX_DESC_VER_2_3	16384
 477
 478/* rx/tx mac addr + type + vlan + align + slack*/
 479#define NV_RX_HEADERS		(64)
 480/* even more slack. */
 481#define NV_RX_ALLOC_PAD		(64)
 482
 483/* maximum mtu size */
 484#define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
 485#define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
 486
 487#define OOM_REFILL	(1+HZ/20)
 488#define POLL_WAIT	(1+HZ/100)
 489#define LINK_TIMEOUT	(3*HZ)
 490#define STATS_INTERVAL	(10*HZ)
 491
 492/*
 493 * desc_ver values:
 494 * The nic supports three different descriptor types:
 495 * - DESC_VER_1: Original
 496 * - DESC_VER_2: support for jumbo frames.
 497 * - DESC_VER_3: 64-bit format.
 498 */
 499#define DESC_VER_1	1
 500#define DESC_VER_2	2
 501#define DESC_VER_3	3
 502
 503/* PHY defines */
 504#define PHY_OUI_MARVELL		0x5043
 505#define PHY_OUI_CICADA		0x03f1
 506#define PHY_OUI_VITESSE		0x01c1
 507#define PHY_OUI_REALTEK		0x0732
 508#define PHY_OUI_REALTEK2	0x0020
 509#define PHYID1_OUI_MASK	0x03ff
 510#define PHYID1_OUI_SHFT	6
 511#define PHYID2_OUI_MASK	0xfc00
 512#define PHYID2_OUI_SHFT	10
 513#define PHYID2_MODEL_MASK		0x03f0
 514#define PHY_MODEL_REALTEK_8211		0x0110
 515#define PHY_REV_MASK			0x0001
 516#define PHY_REV_REALTEK_8211B		0x0000
 517#define PHY_REV_REALTEK_8211C		0x0001
 518#define PHY_MODEL_REALTEK_8201		0x0200
 519#define PHY_MODEL_MARVELL_E3016		0x0220
 520#define PHY_MARVELL_E3016_INITMASK	0x0300
 521#define PHY_CICADA_INIT1	0x0f000
 522#define PHY_CICADA_INIT2	0x0e00
 523#define PHY_CICADA_INIT3	0x01000
 524#define PHY_CICADA_INIT4	0x0200
 525#define PHY_CICADA_INIT5	0x0004
 526#define PHY_CICADA_INIT6	0x02000
 527#define PHY_VITESSE_INIT_REG1	0x1f
 528#define PHY_VITESSE_INIT_REG2	0x10
 529#define PHY_VITESSE_INIT_REG3	0x11
 530#define PHY_VITESSE_INIT_REG4	0x12
 531#define PHY_VITESSE_INIT_MSK1	0xc
 532#define PHY_VITESSE_INIT_MSK2	0x0180
 533#define PHY_VITESSE_INIT1	0x52b5
 534#define PHY_VITESSE_INIT2	0xaf8a
 535#define PHY_VITESSE_INIT3	0x8
 536#define PHY_VITESSE_INIT4	0x8f8a
 537#define PHY_VITESSE_INIT5	0xaf86
 538#define PHY_VITESSE_INIT6	0x8f86
 539#define PHY_VITESSE_INIT7	0xaf82
 540#define PHY_VITESSE_INIT8	0x0100
 541#define PHY_VITESSE_INIT9	0x8f82
 542#define PHY_VITESSE_INIT10	0x0
 543#define PHY_REALTEK_INIT_REG1	0x1f
 544#define PHY_REALTEK_INIT_REG2	0x19
 545#define PHY_REALTEK_INIT_REG3	0x13
 546#define PHY_REALTEK_INIT_REG4	0x14
 547#define PHY_REALTEK_INIT_REG5	0x18
 548#define PHY_REALTEK_INIT_REG6	0x11
 549#define PHY_REALTEK_INIT_REG7	0x01
 550#define PHY_REALTEK_INIT1	0x0000
 551#define PHY_REALTEK_INIT2	0x8e00
 552#define PHY_REALTEK_INIT3	0x0001
 553#define PHY_REALTEK_INIT4	0xad17
 554#define PHY_REALTEK_INIT5	0xfb54
 555#define PHY_REALTEK_INIT6	0xf5c7
 556#define PHY_REALTEK_INIT7	0x1000
 557#define PHY_REALTEK_INIT8	0x0003
 558#define PHY_REALTEK_INIT9	0x0008
 559#define PHY_REALTEK_INIT10	0x0005
 560#define PHY_REALTEK_INIT11	0x0200
 561#define PHY_REALTEK_INIT_MSK1	0x0003
 562
 563#define PHY_GIGABIT	0x0100
 564
 565#define PHY_TIMEOUT	0x1
 566#define PHY_ERROR	0x2
 567
 568#define PHY_100	0x1
 569#define PHY_1000	0x2
 570#define PHY_HALF	0x100
 571
 572#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
 573#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 574#define NV_PAUSEFRAME_RX_ENABLE  0x0004
 575#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 576#define NV_PAUSEFRAME_RX_REQ     0x0010
 577#define NV_PAUSEFRAME_TX_REQ     0x0020
 578#define NV_PAUSEFRAME_AUTONEG    0x0040
 579
 580/* MSI/MSI-X defines */
 581#define NV_MSI_X_MAX_VECTORS  8
 582#define NV_MSI_X_VECTORS_MASK 0x000f
 583#define NV_MSI_CAPABLE        0x0010
 584#define NV_MSI_X_CAPABLE      0x0020
 585#define NV_MSI_ENABLED        0x0040
 586#define NV_MSI_X_ENABLED      0x0080
 587
 588#define NV_MSI_X_VECTOR_ALL   0x0
 589#define NV_MSI_X_VECTOR_RX    0x0
 590#define NV_MSI_X_VECTOR_TX    0x1
 591#define NV_MSI_X_VECTOR_OTHER 0x2
 592
 593#define NV_MSI_PRIV_OFFSET 0x68
 594#define NV_MSI_PRIV_VALUE  0xffffffff
 595
 596#define NV_RESTART_TX         0x1
 597#define NV_RESTART_RX         0x2
 598
 599#define NV_TX_LIMIT_COUNT     16
 600
 601#define NV_DYNAMIC_THRESHOLD        4
 602#define NV_DYNAMIC_MAX_QUIET_COUNT  2048
 603
 604/* statistics */
 605struct nv_ethtool_str {
 606	char name[ETH_GSTRING_LEN];
 607};
 608
 609static const struct nv_ethtool_str nv_estats_str[] = {
 610	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
 611	{ "tx_zero_rexmt" },
 612	{ "tx_one_rexmt" },
 613	{ "tx_many_rexmt" },
 614	{ "tx_late_collision" },
 615	{ "tx_fifo_errors" },
 616	{ "tx_carrier_errors" },
 617	{ "tx_excess_deferral" },
 618	{ "tx_retry_error" },
 619	{ "rx_frame_error" },
 620	{ "rx_extra_byte" },
 621	{ "rx_late_collision" },
 622	{ "rx_runt" },
 623	{ "rx_frame_too_long" },
 624	{ "rx_over_errors" },
 625	{ "rx_crc_errors" },
 626	{ "rx_frame_align_error" },
 627	{ "rx_length_error" },
 628	{ "rx_unicast" },
 629	{ "rx_multicast" },
 630	{ "rx_broadcast" },
 631	{ "rx_packets" },
 632	{ "rx_errors_total" },
 633	{ "tx_errors_total" },
 634
 635	/* version 2 stats */
 636	{ "tx_deferral" },
 637	{ "tx_packets" },
 638	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
 639	{ "tx_pause" },
 640	{ "rx_pause" },
 641	{ "rx_drop_frame" },
 642
 643	/* version 3 stats */
 644	{ "tx_unicast" },
 645	{ "tx_multicast" },
 646	{ "tx_broadcast" }
 647};
 648
 649struct nv_ethtool_stats {
 650	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
 651	u64 tx_zero_rexmt;
 652	u64 tx_one_rexmt;
 653	u64 tx_many_rexmt;
 654	u64 tx_late_collision;
 655	u64 tx_fifo_errors;
 656	u64 tx_carrier_errors;
 657	u64 tx_excess_deferral;
 658	u64 tx_retry_error;
 659	u64 rx_frame_error;
 660	u64 rx_extra_byte;
 661	u64 rx_late_collision;
 662	u64 rx_runt;
 663	u64 rx_frame_too_long;
 664	u64 rx_over_errors;
 665	u64 rx_crc_errors;
 666	u64 rx_frame_align_error;
 667	u64 rx_length_error;
 668	u64 rx_unicast;
 669	u64 rx_multicast;
 670	u64 rx_broadcast;
 671	u64 rx_packets; /* should be ifconfig->rx_packets */
 672	u64 rx_errors_total;
 673	u64 tx_errors_total;
 674
 675	/* version 2 stats */
 676	u64 tx_deferral;
 677	u64 tx_packets; /* should be ifconfig->tx_packets */
 678	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
 679	u64 tx_pause;
 680	u64 rx_pause;
 681	u64 rx_drop_frame;
 682
 683	/* version 3 stats */
 684	u64 tx_unicast;
 685	u64 tx_multicast;
 686	u64 tx_broadcast;
 687};
 688
 689#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
 690#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
 691#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
 692
 693/* diagnostics */
 694#define NV_TEST_COUNT_BASE 3
 695#define NV_TEST_COUNT_EXTENDED 4
 696
 697static const struct nv_ethtool_str nv_etests_str[] = {
 698	{ "link      (online/offline)" },
 699	{ "register  (offline)       " },
 700	{ "interrupt (offline)       " },
 701	{ "loopback  (offline)       " }
 702};
 703
 704struct register_test {
 705	__u32 reg;
 706	__u32 mask;
 707};
 708
 709static const struct register_test nv_registers_test[] = {
 710	{ NvRegUnknownSetupReg6, 0x01 },
 711	{ NvRegMisc1, 0x03c },
 712	{ NvRegOffloadConfig, 0x03ff },
 713	{ NvRegMulticastAddrA, 0xffffffff },
 714	{ NvRegTxWatermark, 0x0ff },
 715	{ NvRegWakeUpFlags, 0x07777 },
 716	{ 0, 0 }
 717};
 718
 719struct nv_skb_map {
 720	struct sk_buff *skb;
 721	dma_addr_t dma;
 722	unsigned int dma_len:31;
 723	unsigned int dma_single:1;
 724	struct ring_desc_ex *first_tx_desc;
 725	struct nv_skb_map *next_tx_ctx;
 726};
 727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 728/*
 729 * SMP locking:
 730 * All hardware access under netdev_priv(dev)->lock, except the performance
 731 * critical parts:
 732 * - rx is (pseudo-) lockless: it relies on the single-threading provided
 733 *	by the arch code for interrupts.
 734 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
 735 *	needs netdev_priv(dev)->lock :-(
 736 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
 737 *
 738 * Hardware stats updates are protected by hwstats_lock:
 739 * - updated by nv_do_stats_poll (timer). This is meant to avoid
 740 *   integer wraparound in the NIC stats registers, at low frequency
 741 *   (0.1 Hz)
 742 * - updated by nv_get_ethtool_stats + nv_get_stats64
 743 *
 744 * Software stats are accessed only through 64b synchronization points
 745 * and are not subject to other synchronization techniques (single
 746 * update thread on the TX or RX paths).
 747 */
 748
 749/* in dev: base, irq */
 750struct fe_priv {
 751	spinlock_t lock;
 752
 753	struct net_device *dev;
 754	struct napi_struct napi;
 755
 756	/* hardware stats are updated in syscall and timer */
 757	spinlock_t hwstats_lock;
 758	struct nv_ethtool_stats estats;
 759
 760	int in_shutdown;
 761	u32 linkspeed;
 762	int duplex;
 763	int autoneg;
 764	int fixed_mode;
 765	int phyaddr;
 766	int wolenabled;
 767	unsigned int phy_oui;
 768	unsigned int phy_model;
 769	unsigned int phy_rev;
 770	u16 gigabit;
 771	int intr_test;
 772	int recover_error;
 773	int quiet_count;
 774
 775	/* General data: RO fields */
 776	dma_addr_t ring_addr;
 777	struct pci_dev *pci_dev;
 778	u32 orig_mac[2];
 779	u32 events;
 780	u32 irqmask;
 781	u32 desc_ver;
 782	u32 txrxctl_bits;
 783	u32 vlanctl_bits;
 784	u32 driver_data;
 785	u32 device_id;
 786	u32 register_size;
 787	u32 mac_in_use;
 788	int mgmt_version;
 789	int mgmt_sema;
 790
 791	void __iomem *base;
 792
 793	/* rx specific fields.
 794	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 795	 */
 796	union ring_type get_rx, put_rx, first_rx, last_rx;
 797	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
 798	struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
 799	struct nv_skb_map *rx_skb;
 800
 801	union ring_type rx_ring;
 802	unsigned int rx_buf_sz;
 803	unsigned int pkt_limit;
 804	struct timer_list oom_kick;
 805	struct timer_list nic_poll;
 806	struct timer_list stats_poll;
 807	u32 nic_poll_irq;
 808	int rx_ring_size;
 809
 810	/* RX software stats */
 811	struct u64_stats_sync swstats_rx_syncp;
 812	u64 stat_rx_packets;
 813	u64 stat_rx_bytes; /* not always available in HW */
 814	u64 stat_rx_missed_errors;
 815	u64 stat_rx_dropped;
 816
 817	/* media detection workaround.
 818	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 819	 */
 820	int need_linktimer;
 821	unsigned long link_timeout;
 822	/*
 823	 * tx specific fields.
 824	 */
 825	union ring_type get_tx, put_tx, first_tx, last_tx;
 826	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
 827	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
 828	struct nv_skb_map *tx_skb;
 829
 830	union ring_type tx_ring;
 831	u32 tx_flags;
 832	int tx_ring_size;
 833	int tx_limit;
 834	u32 tx_pkts_in_progress;
 835	struct nv_skb_map *tx_change_owner;
 836	struct nv_skb_map *tx_end_flip;
 837	int tx_stop;
 838
 839	/* TX software stats */
 840	struct u64_stats_sync swstats_tx_syncp;
 841	u64 stat_tx_packets; /* not always available in HW */
 842	u64 stat_tx_bytes;
 843	u64 stat_tx_dropped;
 844
 845	/* msi/msi-x fields */
 846	u32 msi_flags;
 847	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
 848
 849	/* flow control */
 850	u32 pause_flags;
 851
 852	/* power saved state */
 853	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
 854
 855	/* for different msi-x irq type */
 856	char name_rx[IFNAMSIZ + 3];       /* -rx    */
 857	char name_tx[IFNAMSIZ + 3];       /* -tx    */
 858	char name_other[IFNAMSIZ + 6];    /* -other */
 859};
 860
 861/*
 862 * Maximum number of loops until we assume that a bit in the irq mask
 863 * is stuck. Overridable with module param.
 864 */
 865static int max_interrupt_work = 4;
 866
 867/*
 868 * Optimization can be either throuput mode or cpu mode
 869 *
 870 * Throughput Mode: Every tx and rx packet will generate an interrupt.
 871 * CPU Mode: Interrupts are controlled by a timer.
 872 */
 873enum {
 874	NV_OPTIMIZATION_MODE_THROUGHPUT,
 875	NV_OPTIMIZATION_MODE_CPU,
 876	NV_OPTIMIZATION_MODE_DYNAMIC
 877};
 878static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
 879
 880/*
 881 * Poll interval for timer irq
 882 *
 883 * This interval determines how frequent an interrupt is generated.
 884 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
 885 * Min = 0, and Max = 65535
 886 */
 887static int poll_interval = -1;
 888
 889/*
 890 * MSI interrupts
 891 */
 892enum {
 893	NV_MSI_INT_DISABLED,
 894	NV_MSI_INT_ENABLED
 895};
 896static int msi = NV_MSI_INT_ENABLED;
 897
 898/*
 899 * MSIX interrupts
 900 */
 901enum {
 902	NV_MSIX_INT_DISABLED,
 903	NV_MSIX_INT_ENABLED
 904};
 905static int msix = NV_MSIX_INT_ENABLED;
 906
 907/*
 908 * DMA 64bit
 909 */
 910enum {
 911	NV_DMA_64BIT_DISABLED,
 912	NV_DMA_64BIT_ENABLED
 913};
 914static int dma_64bit = NV_DMA_64BIT_ENABLED;
 915
 916/*
 917 * Debug output control for tx_timeout
 918 */
 919static bool debug_tx_timeout = false;
 920
 921/*
 922 * Crossover Detection
 923 * Realtek 8201 phy + some OEM boards do not work properly.
 924 */
 925enum {
 926	NV_CROSSOVER_DETECTION_DISABLED,
 927	NV_CROSSOVER_DETECTION_ENABLED
 928};
 929static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
 930
 931/*
 932 * Power down phy when interface is down (persists through reboot;
 933 * older Linux and other OSes may not power it up again)
 934 */
 935static int phy_power_down;
 936
 937static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 938{
 939	return netdev_priv(dev);
 940}
 941
 942static inline u8 __iomem *get_hwbase(struct net_device *dev)
 943{
 944	return ((struct fe_priv *)netdev_priv(dev))->base;
 945}
 946
 947static inline void pci_push(u8 __iomem *base)
 948{
 949	/* force out pending posted writes */
 950	readl(base);
 951}
 952
 953static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
 954{
 955	return le32_to_cpu(prd->flaglen)
 956		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 957}
 958
 959static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 960{
 961	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 962}
 963
 964static bool nv_optimized(struct fe_priv *np)
 965{
 966	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 967		return false;
 968	return true;
 969}
 970
 971static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 972		     int delay, int delaymax)
 973{
 974	u8 __iomem *base = get_hwbase(dev);
 975
 976	pci_push(base);
 977	do {
 978		udelay(delay);
 979		delaymax -= delay;
 980		if (delaymax < 0)
 981			return 1;
 982	} while ((readl(base + offset) & mask) != target);
 983	return 0;
 984}
 985
 986#define NV_SETUP_RX_RING 0x01
 987#define NV_SETUP_TX_RING 0x02
 988
 989static inline u32 dma_low(dma_addr_t addr)
 990{
 991	return addr;
 992}
 993
 994static inline u32 dma_high(dma_addr_t addr)
 995{
 996	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
 997}
 998
 999static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
1000{
1001	struct fe_priv *np = get_nvpriv(dev);
1002	u8 __iomem *base = get_hwbase(dev);
1003
1004	if (!nv_optimized(np)) {
1005		if (rxtx_flags & NV_SETUP_RX_RING)
1006			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1007		if (rxtx_flags & NV_SETUP_TX_RING)
1008			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1009	} else {
1010		if (rxtx_flags & NV_SETUP_RX_RING) {
1011			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1012			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1013		}
1014		if (rxtx_flags & NV_SETUP_TX_RING) {
1015			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1016			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1017		}
1018	}
1019}
1020
1021static void free_rings(struct net_device *dev)
1022{
1023	struct fe_priv *np = get_nvpriv(dev);
1024
1025	if (!nv_optimized(np)) {
1026		if (np->rx_ring.orig)
1027			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1028					    np->rx_ring.orig, np->ring_addr);
 
 
 
1029	} else {
1030		if (np->rx_ring.ex)
1031			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1032					    np->rx_ring.ex, np->ring_addr);
 
 
 
1033	}
1034	kfree(np->rx_skb);
1035	kfree(np->tx_skb);
1036}
1037
1038static int using_multi_irqs(struct net_device *dev)
1039{
1040	struct fe_priv *np = get_nvpriv(dev);
1041
1042	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1043	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
1044	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1045		return 0;
1046	else
1047		return 1;
1048}
1049
1050static void nv_txrx_gate(struct net_device *dev, bool gate)
1051{
1052	struct fe_priv *np = get_nvpriv(dev);
1053	u8 __iomem *base = get_hwbase(dev);
1054	u32 powerstate;
1055
1056	if (!np->mac_in_use &&
1057	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1058		powerstate = readl(base + NvRegPowerState2);
1059		if (gate)
1060			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1061		else
1062			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1063		writel(powerstate, base + NvRegPowerState2);
1064	}
1065}
1066
1067static void nv_enable_irq(struct net_device *dev)
1068{
1069	struct fe_priv *np = get_nvpriv(dev);
1070
1071	if (!using_multi_irqs(dev)) {
1072		if (np->msi_flags & NV_MSI_X_ENABLED)
1073			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1074		else
1075			enable_irq(np->pci_dev->irq);
1076	} else {
1077		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1078		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1079		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1080	}
1081}
1082
1083static void nv_disable_irq(struct net_device *dev)
1084{
1085	struct fe_priv *np = get_nvpriv(dev);
1086
1087	if (!using_multi_irqs(dev)) {
1088		if (np->msi_flags & NV_MSI_X_ENABLED)
1089			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1090		else
1091			disable_irq(np->pci_dev->irq);
1092	} else {
1093		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1094		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1095		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1096	}
1097}
1098
1099/* In MSIX mode, a write to irqmask behaves as XOR */
1100static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1101{
1102	u8 __iomem *base = get_hwbase(dev);
1103
1104	writel(mask, base + NvRegIrqMask);
1105}
1106
1107static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1108{
1109	struct fe_priv *np = get_nvpriv(dev);
1110	u8 __iomem *base = get_hwbase(dev);
1111
1112	if (np->msi_flags & NV_MSI_X_ENABLED) {
1113		writel(mask, base + NvRegIrqMask);
1114	} else {
1115		if (np->msi_flags & NV_MSI_ENABLED)
1116			writel(0, base + NvRegMSIIrqMask);
1117		writel(0, base + NvRegIrqMask);
1118	}
1119}
1120
1121static void nv_napi_enable(struct net_device *dev)
1122{
1123	struct fe_priv *np = get_nvpriv(dev);
1124
1125	napi_enable(&np->napi);
1126}
1127
1128static void nv_napi_disable(struct net_device *dev)
1129{
1130	struct fe_priv *np = get_nvpriv(dev);
1131
1132	napi_disable(&np->napi);
1133}
1134
1135#define MII_READ	(-1)
1136/* mii_rw: read/write a register on the PHY.
1137 *
1138 * Caller must guarantee serialization
1139 */
1140static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1141{
1142	u8 __iomem *base = get_hwbase(dev);
1143	u32 reg;
1144	int retval;
1145
1146	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1147
1148	reg = readl(base + NvRegMIIControl);
1149	if (reg & NVREG_MIICTL_INUSE) {
1150		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1151		udelay(NV_MIIBUSY_DELAY);
1152	}
1153
1154	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1155	if (value != MII_READ) {
1156		writel(value, base + NvRegMIIData);
1157		reg |= NVREG_MIICTL_WRITE;
1158	}
1159	writel(reg, base + NvRegMIIControl);
1160
1161	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1162			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1163		retval = -1;
1164	} else if (value != MII_READ) {
1165		/* it was a write operation - fewer failures are detectable */
1166		retval = 0;
1167	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1168		retval = -1;
1169	} else {
1170		retval = readl(base + NvRegMIIData);
1171	}
1172
1173	return retval;
1174}
1175
1176static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1177{
1178	struct fe_priv *np = netdev_priv(dev);
1179	u32 miicontrol;
1180	unsigned int tries = 0;
1181
1182	miicontrol = BMCR_RESET | bmcr_setup;
1183	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1184		return -1;
1185
1186	/* wait for 500ms */
1187	msleep(500);
1188
1189	/* must wait till reset is deasserted */
1190	while (miicontrol & BMCR_RESET) {
1191		usleep_range(10000, 20000);
1192		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1193		/* FIXME: 100 tries seem excessive */
1194		if (tries++ > 100)
1195			return -1;
1196	}
1197	return 0;
1198}
1199
1200static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1201{
1202	static const struct {
1203		int reg;
1204		int init;
1205	} ri[] = {
1206		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1207		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1208		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1209		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1210		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1211		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1212		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1213	};
1214	int i;
1215
1216	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1217		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1218			return PHY_ERROR;
1219	}
1220
1221	return 0;
1222}
1223
1224static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1225{
1226	u32 reg;
1227	u8 __iomem *base = get_hwbase(dev);
1228	u32 powerstate = readl(base + NvRegPowerState2);
1229
1230	/* need to perform hw phy reset */
1231	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1232	writel(powerstate, base + NvRegPowerState2);
1233	msleep(25);
1234
1235	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1236	writel(powerstate, base + NvRegPowerState2);
1237	msleep(25);
1238
1239	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1240	reg |= PHY_REALTEK_INIT9;
1241	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1242		return PHY_ERROR;
1243	if (mii_rw(dev, np->phyaddr,
1244		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1245		return PHY_ERROR;
1246	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1247	if (!(reg & PHY_REALTEK_INIT11)) {
1248		reg |= PHY_REALTEK_INIT11;
1249		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1250			return PHY_ERROR;
1251	}
1252	if (mii_rw(dev, np->phyaddr,
1253		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1254		return PHY_ERROR;
1255
1256	return 0;
1257}
1258
1259static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1260{
1261	u32 phy_reserved;
1262
1263	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1264		phy_reserved = mii_rw(dev, np->phyaddr,
1265				      PHY_REALTEK_INIT_REG6, MII_READ);
1266		phy_reserved |= PHY_REALTEK_INIT7;
1267		if (mii_rw(dev, np->phyaddr,
1268			   PHY_REALTEK_INIT_REG6, phy_reserved))
1269			return PHY_ERROR;
1270	}
1271
1272	return 0;
1273}
1274
1275static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1276{
1277	u32 phy_reserved;
1278
1279	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1280		if (mii_rw(dev, np->phyaddr,
1281			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1282			return PHY_ERROR;
1283		phy_reserved = mii_rw(dev, np->phyaddr,
1284				      PHY_REALTEK_INIT_REG2, MII_READ);
1285		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1286		phy_reserved |= PHY_REALTEK_INIT3;
1287		if (mii_rw(dev, np->phyaddr,
1288			   PHY_REALTEK_INIT_REG2, phy_reserved))
1289			return PHY_ERROR;
1290		if (mii_rw(dev, np->phyaddr,
1291			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1292			return PHY_ERROR;
1293	}
1294
1295	return 0;
1296}
1297
1298static int init_cicada(struct net_device *dev, struct fe_priv *np,
1299		       u32 phyinterface)
1300{
1301	u32 phy_reserved;
1302
1303	if (phyinterface & PHY_RGMII) {
1304		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1305		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1306		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1307		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1308			return PHY_ERROR;
1309		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1310		phy_reserved |= PHY_CICADA_INIT5;
1311		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1312			return PHY_ERROR;
1313	}
1314	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1315	phy_reserved |= PHY_CICADA_INIT6;
1316	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1317		return PHY_ERROR;
1318
1319	return 0;
1320}
1321
1322static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1323{
1324	u32 phy_reserved;
1325
1326	if (mii_rw(dev, np->phyaddr,
1327		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1328		return PHY_ERROR;
1329	if (mii_rw(dev, np->phyaddr,
1330		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1331		return PHY_ERROR;
1332	phy_reserved = mii_rw(dev, np->phyaddr,
1333			      PHY_VITESSE_INIT_REG4, MII_READ);
1334	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1335		return PHY_ERROR;
1336	phy_reserved = mii_rw(dev, np->phyaddr,
1337			      PHY_VITESSE_INIT_REG3, MII_READ);
1338	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1339	phy_reserved |= PHY_VITESSE_INIT3;
1340	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1341		return PHY_ERROR;
1342	if (mii_rw(dev, np->phyaddr,
1343		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1344		return PHY_ERROR;
1345	if (mii_rw(dev, np->phyaddr,
1346		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1347		return PHY_ERROR;
1348	phy_reserved = mii_rw(dev, np->phyaddr,
1349			      PHY_VITESSE_INIT_REG4, MII_READ);
1350	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1351	phy_reserved |= PHY_VITESSE_INIT3;
1352	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1353		return PHY_ERROR;
1354	phy_reserved = mii_rw(dev, np->phyaddr,
1355			      PHY_VITESSE_INIT_REG3, MII_READ);
1356	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1357		return PHY_ERROR;
1358	if (mii_rw(dev, np->phyaddr,
1359		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1360		return PHY_ERROR;
1361	if (mii_rw(dev, np->phyaddr,
1362		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1363		return PHY_ERROR;
1364	phy_reserved = mii_rw(dev, np->phyaddr,
1365			      PHY_VITESSE_INIT_REG4, MII_READ);
1366	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1367		return PHY_ERROR;
1368	phy_reserved = mii_rw(dev, np->phyaddr,
1369			      PHY_VITESSE_INIT_REG3, MII_READ);
1370	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1371	phy_reserved |= PHY_VITESSE_INIT8;
1372	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1373		return PHY_ERROR;
1374	if (mii_rw(dev, np->phyaddr,
1375		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1376		return PHY_ERROR;
1377	if (mii_rw(dev, np->phyaddr,
1378		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1379		return PHY_ERROR;
1380
1381	return 0;
1382}
1383
1384static int phy_init(struct net_device *dev)
1385{
1386	struct fe_priv *np = get_nvpriv(dev);
1387	u8 __iomem *base = get_hwbase(dev);
1388	u32 phyinterface;
1389	u32 mii_status, mii_control, mii_control_1000, reg;
1390
1391	/* phy errata for E3016 phy */
1392	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1393		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1394		reg &= ~PHY_MARVELL_E3016_INITMASK;
1395		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1396			netdev_info(dev, "%s: phy write to errata reg failed\n",
1397				    pci_name(np->pci_dev));
1398			return PHY_ERROR;
1399		}
1400	}
1401	if (np->phy_oui == PHY_OUI_REALTEK) {
1402		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1403		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1404			if (init_realtek_8211b(dev, np)) {
1405				netdev_info(dev, "%s: phy init failed\n",
1406					    pci_name(np->pci_dev));
1407				return PHY_ERROR;
1408			}
1409		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1410			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1411			if (init_realtek_8211c(dev, np)) {
1412				netdev_info(dev, "%s: phy init failed\n",
1413					    pci_name(np->pci_dev));
1414				return PHY_ERROR;
1415			}
1416		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1417			if (init_realtek_8201(dev, np)) {
1418				netdev_info(dev, "%s: phy init failed\n",
1419					    pci_name(np->pci_dev));
1420				return PHY_ERROR;
1421			}
1422		}
1423	}
1424
1425	/* set advertise register */
1426	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1427	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1428		ADVERTISE_100HALF | ADVERTISE_100FULL |
1429		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1430	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1431		netdev_info(dev, "%s: phy write to advertise failed\n",
1432			    pci_name(np->pci_dev));
1433		return PHY_ERROR;
1434	}
1435
1436	/* get phy interface type */
1437	phyinterface = readl(base + NvRegPhyInterface);
1438
1439	/* see if gigabit phy */
1440	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1441	if (mii_status & PHY_GIGABIT) {
1442		np->gigabit = PHY_GIGABIT;
1443		mii_control_1000 = mii_rw(dev, np->phyaddr,
1444					  MII_CTRL1000, MII_READ);
1445		mii_control_1000 &= ~ADVERTISE_1000HALF;
1446		if (phyinterface & PHY_RGMII)
1447			mii_control_1000 |= ADVERTISE_1000FULL;
1448		else
1449			mii_control_1000 &= ~ADVERTISE_1000FULL;
1450
1451		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1452			netdev_info(dev, "%s: phy init failed\n",
1453				    pci_name(np->pci_dev));
1454			return PHY_ERROR;
1455		}
1456	} else
1457		np->gigabit = 0;
1458
1459	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1460	mii_control |= BMCR_ANENABLE;
1461
1462	if (np->phy_oui == PHY_OUI_REALTEK &&
1463	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1464	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1465		/* start autoneg since we already performed hw reset above */
1466		mii_control |= BMCR_ANRESTART;
1467		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1468			netdev_info(dev, "%s: phy init failed\n",
1469				    pci_name(np->pci_dev));
1470			return PHY_ERROR;
1471		}
1472	} else {
1473		/* reset the phy
1474		 * (certain phys need bmcr to be setup with reset)
1475		 */
1476		if (phy_reset(dev, mii_control)) {
1477			netdev_info(dev, "%s: phy reset failed\n",
1478				    pci_name(np->pci_dev));
1479			return PHY_ERROR;
1480		}
1481	}
1482
1483	/* phy vendor specific configuration */
1484	if ((np->phy_oui == PHY_OUI_CICADA)) {
1485		if (init_cicada(dev, np, phyinterface)) {
1486			netdev_info(dev, "%s: phy init failed\n",
1487				    pci_name(np->pci_dev));
1488			return PHY_ERROR;
1489		}
1490	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1491		if (init_vitesse(dev, np)) {
1492			netdev_info(dev, "%s: phy init failed\n",
1493				    pci_name(np->pci_dev));
1494			return PHY_ERROR;
1495		}
1496	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1497		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1498		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1499			/* reset could have cleared these out, set them back */
1500			if (init_realtek_8211b(dev, np)) {
1501				netdev_info(dev, "%s: phy init failed\n",
1502					    pci_name(np->pci_dev));
1503				return PHY_ERROR;
1504			}
1505		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1506			if (init_realtek_8201(dev, np) ||
1507			    init_realtek_8201_cross(dev, np)) {
1508				netdev_info(dev, "%s: phy init failed\n",
1509					    pci_name(np->pci_dev));
1510				return PHY_ERROR;
1511			}
1512		}
1513	}
1514
1515	/* some phys clear out pause advertisement on reset, set it back */
1516	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1517
1518	/* restart auto negotiation, power down phy */
1519	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1520	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1521	if (phy_power_down)
1522		mii_control |= BMCR_PDOWN;
1523	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1524		return PHY_ERROR;
1525
1526	return 0;
1527}
1528
1529static void nv_start_rx(struct net_device *dev)
1530{
1531	struct fe_priv *np = netdev_priv(dev);
1532	u8 __iomem *base = get_hwbase(dev);
1533	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1534
1535	/* Already running? Stop it. */
1536	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1537		rx_ctrl &= ~NVREG_RCVCTL_START;
1538		writel(rx_ctrl, base + NvRegReceiverControl);
1539		pci_push(base);
1540	}
1541	writel(np->linkspeed, base + NvRegLinkSpeed);
1542	pci_push(base);
1543	rx_ctrl |= NVREG_RCVCTL_START;
1544	if (np->mac_in_use)
1545		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1546	writel(rx_ctrl, base + NvRegReceiverControl);
1547	pci_push(base);
1548}
1549
1550static void nv_stop_rx(struct net_device *dev)
1551{
1552	struct fe_priv *np = netdev_priv(dev);
1553	u8 __iomem *base = get_hwbase(dev);
1554	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1555
1556	if (!np->mac_in_use)
1557		rx_ctrl &= ~NVREG_RCVCTL_START;
1558	else
1559		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1560	writel(rx_ctrl, base + NvRegReceiverControl);
1561	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1562		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1563		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1564			    __func__);
1565
1566	udelay(NV_RXSTOP_DELAY2);
1567	if (!np->mac_in_use)
1568		writel(0, base + NvRegLinkSpeed);
1569}
1570
1571static void nv_start_tx(struct net_device *dev)
1572{
1573	struct fe_priv *np = netdev_priv(dev);
1574	u8 __iomem *base = get_hwbase(dev);
1575	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1576
1577	tx_ctrl |= NVREG_XMITCTL_START;
1578	if (np->mac_in_use)
1579		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1580	writel(tx_ctrl, base + NvRegTransmitterControl);
1581	pci_push(base);
1582}
1583
1584static void nv_stop_tx(struct net_device *dev)
1585{
1586	struct fe_priv *np = netdev_priv(dev);
1587	u8 __iomem *base = get_hwbase(dev);
1588	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1589
1590	if (!np->mac_in_use)
1591		tx_ctrl &= ~NVREG_XMITCTL_START;
1592	else
1593		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1594	writel(tx_ctrl, base + NvRegTransmitterControl);
1595	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1596		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1597		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1598			    __func__);
1599
1600	udelay(NV_TXSTOP_DELAY2);
1601	if (!np->mac_in_use)
1602		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1603		       base + NvRegTransmitPoll);
1604}
1605
1606static void nv_start_rxtx(struct net_device *dev)
1607{
1608	nv_start_rx(dev);
1609	nv_start_tx(dev);
1610}
1611
1612static void nv_stop_rxtx(struct net_device *dev)
1613{
1614	nv_stop_rx(dev);
1615	nv_stop_tx(dev);
1616}
1617
1618static void nv_txrx_reset(struct net_device *dev)
1619{
1620	struct fe_priv *np = netdev_priv(dev);
1621	u8 __iomem *base = get_hwbase(dev);
1622
1623	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1624	pci_push(base);
1625	udelay(NV_TXRX_RESET_DELAY);
1626	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1627	pci_push(base);
1628}
1629
1630static void nv_mac_reset(struct net_device *dev)
1631{
1632	struct fe_priv *np = netdev_priv(dev);
1633	u8 __iomem *base = get_hwbase(dev);
1634	u32 temp1, temp2, temp3;
1635
1636	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1637	pci_push(base);
1638
1639	/* save registers since they will be cleared on reset */
1640	temp1 = readl(base + NvRegMacAddrA);
1641	temp2 = readl(base + NvRegMacAddrB);
1642	temp3 = readl(base + NvRegTransmitPoll);
1643
1644	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1645	pci_push(base);
1646	udelay(NV_MAC_RESET_DELAY);
1647	writel(0, base + NvRegMacReset);
1648	pci_push(base);
1649	udelay(NV_MAC_RESET_DELAY);
1650
1651	/* restore saved registers */
1652	writel(temp1, base + NvRegMacAddrA);
1653	writel(temp2, base + NvRegMacAddrB);
1654	writel(temp3, base + NvRegTransmitPoll);
1655
1656	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1657	pci_push(base);
1658}
1659
1660/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1661static void nv_update_stats(struct net_device *dev)
1662{
1663	struct fe_priv *np = netdev_priv(dev);
1664	u8 __iomem *base = get_hwbase(dev);
1665
1666	/* If it happens that this is run in top-half context, then
1667	 * replace the spin_lock of hwstats_lock with
1668	 * spin_lock_irqsave() in calling functions. */
1669	WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
1670	assert_spin_locked(&np->hwstats_lock);
1671
1672	/* query hardware */
1673	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1674	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1675	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1676	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1677	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1678	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1679	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1680	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1681	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1682	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1683	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1684	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1685	np->estats.rx_runt += readl(base + NvRegRxRunt);
1686	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1687	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1688	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1689	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1690	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1691	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1692	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1693	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1694	np->estats.rx_packets =
1695		np->estats.rx_unicast +
1696		np->estats.rx_multicast +
1697		np->estats.rx_broadcast;
1698	np->estats.rx_errors_total =
1699		np->estats.rx_crc_errors +
1700		np->estats.rx_over_errors +
1701		np->estats.rx_frame_error +
1702		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1703		np->estats.rx_late_collision +
1704		np->estats.rx_runt +
1705		np->estats.rx_frame_too_long;
1706	np->estats.tx_errors_total =
1707		np->estats.tx_late_collision +
1708		np->estats.tx_fifo_errors +
1709		np->estats.tx_carrier_errors +
1710		np->estats.tx_excess_deferral +
1711		np->estats.tx_retry_error;
1712
1713	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1714		np->estats.tx_deferral += readl(base + NvRegTxDef);
1715		np->estats.tx_packets += readl(base + NvRegTxFrame);
1716		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1717		np->estats.tx_pause += readl(base + NvRegTxPause);
1718		np->estats.rx_pause += readl(base + NvRegRxPause);
1719		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1720		np->estats.rx_errors_total += np->estats.rx_drop_frame;
1721	}
1722
1723	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1724		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1725		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1726		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1727	}
1728}
1729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730/*
1731 * nv_get_stats64: dev->ndo_get_stats64 function
1732 * Get latest stats value from the nic.
1733 * Called with read_lock(&dev_base_lock) held for read -
1734 * only synchronized against unregister_netdevice.
1735 */
1736static struct rtnl_link_stats64*
1737nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1738	__acquires(&netdev_priv(dev)->hwstats_lock)
1739	__releases(&netdev_priv(dev)->hwstats_lock)
1740{
1741	struct fe_priv *np = netdev_priv(dev);
1742	unsigned int syncp_start;
1743
1744	/*
1745	 * Note: because HW stats are not always available and for
1746	 * consistency reasons, the following ifconfig stats are
1747	 * managed by software: rx_bytes, tx_bytes, rx_packets and
1748	 * tx_packets. The related hardware stats reported by ethtool
1749	 * should be equivalent to these ifconfig stats, with 4
1750	 * additional bytes per packet (Ethernet FCS CRC), except for
1751	 * tx_packets when TSO kicks in.
1752	 */
1753
1754	/* software stats */
1755	do {
1756		syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1757		storage->rx_packets       = np->stat_rx_packets;
1758		storage->rx_bytes         = np->stat_rx_bytes;
1759		storage->rx_dropped       = np->stat_rx_dropped;
1760		storage->rx_missed_errors = np->stat_rx_missed_errors;
1761	} while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1762
1763	do {
1764		syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1765		storage->tx_packets = np->stat_tx_packets;
1766		storage->tx_bytes   = np->stat_tx_bytes;
1767		storage->tx_dropped = np->stat_tx_dropped;
1768	} while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1769
1770	/* If the nic supports hw counters then retrieve latest values */
1771	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1772		spin_lock_bh(&np->hwstats_lock);
1773
1774		nv_update_stats(dev);
1775
1776		/* generic stats */
1777		storage->rx_errors = np->estats.rx_errors_total;
1778		storage->tx_errors = np->estats.tx_errors_total;
1779
1780		/* meaningful only when NIC supports stats v3 */
1781		storage->multicast = np->estats.rx_multicast;
1782
1783		/* detailed rx_errors */
1784		storage->rx_length_errors = np->estats.rx_length_error;
1785		storage->rx_over_errors   = np->estats.rx_over_errors;
1786		storage->rx_crc_errors    = np->estats.rx_crc_errors;
1787		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1788		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1789
1790		/* detailed tx_errors */
1791		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1792		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1793
1794		spin_unlock_bh(&np->hwstats_lock);
1795	}
1796
1797	return storage;
1798}
1799
1800/*
1801 * nv_alloc_rx: fill rx ring entries.
1802 * Return 1 if the allocations for the skbs failed and the
1803 * rx engine is without Available descriptors
1804 */
1805static int nv_alloc_rx(struct net_device *dev)
1806{
1807	struct fe_priv *np = netdev_priv(dev);
1808	struct ring_desc *less_rx;
1809
1810	less_rx = np->get_rx.orig;
1811	if (less_rx-- == np->first_rx.orig)
1812		less_rx = np->last_rx.orig;
1813
1814	while (np->put_rx.orig != less_rx) {
1815		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1816		if (skb) {
1817			np->put_rx_ctx->skb = skb;
1818			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1819							     skb->data,
1820							     skb_tailroom(skb),
1821							     PCI_DMA_FROMDEVICE);
1822			if (pci_dma_mapping_error(np->pci_dev,
1823						  np->put_rx_ctx->dma)) {
1824				kfree_skb(skb);
1825				goto packet_dropped;
1826			}
1827			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1828			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1829			wmb();
1830			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1831			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1832				np->put_rx.orig = np->first_rx.orig;
1833			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1834				np->put_rx_ctx = np->first_rx_ctx;
1835		} else {
1836packet_dropped:
1837			u64_stats_update_begin(&np->swstats_rx_syncp);
1838			np->stat_rx_dropped++;
1839			u64_stats_update_end(&np->swstats_rx_syncp);
1840			return 1;
1841		}
1842	}
1843	return 0;
1844}
1845
1846static int nv_alloc_rx_optimized(struct net_device *dev)
1847{
1848	struct fe_priv *np = netdev_priv(dev);
1849	struct ring_desc_ex *less_rx;
1850
1851	less_rx = np->get_rx.ex;
1852	if (less_rx-- == np->first_rx.ex)
1853		less_rx = np->last_rx.ex;
1854
1855	while (np->put_rx.ex != less_rx) {
1856		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1857		if (skb) {
1858			np->put_rx_ctx->skb = skb;
1859			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1860							     skb->data,
1861							     skb_tailroom(skb),
1862							     PCI_DMA_FROMDEVICE);
1863			if (pci_dma_mapping_error(np->pci_dev,
1864						  np->put_rx_ctx->dma)) {
1865				kfree_skb(skb);
1866				goto packet_dropped;
1867			}
1868			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1869			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1870			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1871			wmb();
1872			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1873			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1874				np->put_rx.ex = np->first_rx.ex;
1875			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1876				np->put_rx_ctx = np->first_rx_ctx;
1877		} else {
1878packet_dropped:
1879			u64_stats_update_begin(&np->swstats_rx_syncp);
1880			np->stat_rx_dropped++;
1881			u64_stats_update_end(&np->swstats_rx_syncp);
1882			return 1;
1883		}
1884	}
1885	return 0;
1886}
1887
1888/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1889static void nv_do_rx_refill(unsigned long data)
1890{
1891	struct net_device *dev = (struct net_device *) data;
1892	struct fe_priv *np = netdev_priv(dev);
1893
1894	/* Just reschedule NAPI rx processing */
1895	napi_schedule(&np->napi);
1896}
1897
1898static void nv_init_rx(struct net_device *dev)
1899{
1900	struct fe_priv *np = netdev_priv(dev);
1901	int i;
1902
1903	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
 
1904
1905	if (!nv_optimized(np))
1906		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1907	else
1908		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1909	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
 
1910	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1911
1912	for (i = 0; i < np->rx_ring_size; i++) {
1913		if (!nv_optimized(np)) {
1914			np->rx_ring.orig[i].flaglen = 0;
1915			np->rx_ring.orig[i].buf = 0;
1916		} else {
1917			np->rx_ring.ex[i].flaglen = 0;
1918			np->rx_ring.ex[i].txvlan = 0;
1919			np->rx_ring.ex[i].bufhigh = 0;
1920			np->rx_ring.ex[i].buflow = 0;
1921		}
1922		np->rx_skb[i].skb = NULL;
1923		np->rx_skb[i].dma = 0;
1924	}
1925}
1926
1927static void nv_init_tx(struct net_device *dev)
1928{
1929	struct fe_priv *np = netdev_priv(dev);
1930	int i;
1931
1932	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
 
1933
1934	if (!nv_optimized(np))
1935		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1936	else
1937		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1938	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
 
1939	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1940	netdev_reset_queue(np->dev);
1941	np->tx_pkts_in_progress = 0;
1942	np->tx_change_owner = NULL;
1943	np->tx_end_flip = NULL;
1944	np->tx_stop = 0;
1945
1946	for (i = 0; i < np->tx_ring_size; i++) {
1947		if (!nv_optimized(np)) {
1948			np->tx_ring.orig[i].flaglen = 0;
1949			np->tx_ring.orig[i].buf = 0;
1950		} else {
1951			np->tx_ring.ex[i].flaglen = 0;
1952			np->tx_ring.ex[i].txvlan = 0;
1953			np->tx_ring.ex[i].bufhigh = 0;
1954			np->tx_ring.ex[i].buflow = 0;
1955		}
1956		np->tx_skb[i].skb = NULL;
1957		np->tx_skb[i].dma = 0;
1958		np->tx_skb[i].dma_len = 0;
1959		np->tx_skb[i].dma_single = 0;
1960		np->tx_skb[i].first_tx_desc = NULL;
1961		np->tx_skb[i].next_tx_ctx = NULL;
1962	}
1963}
1964
1965static int nv_init_ring(struct net_device *dev)
1966{
1967	struct fe_priv *np = netdev_priv(dev);
1968
1969	nv_init_tx(dev);
1970	nv_init_rx(dev);
1971
1972	if (!nv_optimized(np))
1973		return nv_alloc_rx(dev);
1974	else
1975		return nv_alloc_rx_optimized(dev);
1976}
1977
1978static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1979{
1980	if (tx_skb->dma) {
1981		if (tx_skb->dma_single)
1982			pci_unmap_single(np->pci_dev, tx_skb->dma,
1983					 tx_skb->dma_len,
1984					 PCI_DMA_TODEVICE);
1985		else
1986			pci_unmap_page(np->pci_dev, tx_skb->dma,
1987				       tx_skb->dma_len,
1988				       PCI_DMA_TODEVICE);
1989		tx_skb->dma = 0;
1990	}
1991}
1992
1993static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1994{
1995	nv_unmap_txskb(np, tx_skb);
1996	if (tx_skb->skb) {
1997		dev_kfree_skb_any(tx_skb->skb);
1998		tx_skb->skb = NULL;
1999		return 1;
2000	}
2001	return 0;
2002}
2003
2004static void nv_drain_tx(struct net_device *dev)
2005{
2006	struct fe_priv *np = netdev_priv(dev);
2007	unsigned int i;
2008
2009	for (i = 0; i < np->tx_ring_size; i++) {
2010		if (!nv_optimized(np)) {
2011			np->tx_ring.orig[i].flaglen = 0;
2012			np->tx_ring.orig[i].buf = 0;
2013		} else {
2014			np->tx_ring.ex[i].flaglen = 0;
2015			np->tx_ring.ex[i].txvlan = 0;
2016			np->tx_ring.ex[i].bufhigh = 0;
2017			np->tx_ring.ex[i].buflow = 0;
2018		}
2019		if (nv_release_txskb(np, &np->tx_skb[i])) {
2020			u64_stats_update_begin(&np->swstats_tx_syncp);
2021			np->stat_tx_dropped++;
2022			u64_stats_update_end(&np->swstats_tx_syncp);
2023		}
2024		np->tx_skb[i].dma = 0;
2025		np->tx_skb[i].dma_len = 0;
2026		np->tx_skb[i].dma_single = 0;
2027		np->tx_skb[i].first_tx_desc = NULL;
2028		np->tx_skb[i].next_tx_ctx = NULL;
2029	}
2030	np->tx_pkts_in_progress = 0;
2031	np->tx_change_owner = NULL;
2032	np->tx_end_flip = NULL;
2033}
2034
2035static void nv_drain_rx(struct net_device *dev)
2036{
2037	struct fe_priv *np = netdev_priv(dev);
2038	int i;
2039
2040	for (i = 0; i < np->rx_ring_size; i++) {
2041		if (!nv_optimized(np)) {
2042			np->rx_ring.orig[i].flaglen = 0;
2043			np->rx_ring.orig[i].buf = 0;
2044		} else {
2045			np->rx_ring.ex[i].flaglen = 0;
2046			np->rx_ring.ex[i].txvlan = 0;
2047			np->rx_ring.ex[i].bufhigh = 0;
2048			np->rx_ring.ex[i].buflow = 0;
2049		}
2050		wmb();
2051		if (np->rx_skb[i].skb) {
2052			pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
2053					 (skb_end_pointer(np->rx_skb[i].skb) -
2054					  np->rx_skb[i].skb->data),
2055					 PCI_DMA_FROMDEVICE);
2056			dev_kfree_skb(np->rx_skb[i].skb);
2057			np->rx_skb[i].skb = NULL;
2058		}
2059	}
2060}
2061
2062static void nv_drain_rxtx(struct net_device *dev)
2063{
2064	nv_drain_tx(dev);
2065	nv_drain_rx(dev);
2066}
2067
2068static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2069{
2070	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2071}
2072
2073static void nv_legacybackoff_reseed(struct net_device *dev)
2074{
2075	u8 __iomem *base = get_hwbase(dev);
2076	u32 reg;
2077	u32 low;
2078	int tx_status = 0;
2079
2080	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2081	get_random_bytes(&low, sizeof(low));
2082	reg |= low & NVREG_SLOTTIME_MASK;
2083
2084	/* Need to stop tx before change takes effect.
2085	 * Caller has already gained np->lock.
2086	 */
2087	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2088	if (tx_status)
2089		nv_stop_tx(dev);
2090	nv_stop_rx(dev);
2091	writel(reg, base + NvRegSlotTime);
2092	if (tx_status)
2093		nv_start_tx(dev);
2094	nv_start_rx(dev);
2095}
2096
2097/* Gear Backoff Seeds */
2098#define BACKOFF_SEEDSET_ROWS	8
2099#define BACKOFF_SEEDSET_LFSRS	15
2100
2101/* Known Good seed sets */
2102static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2103	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2104	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2105	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2106	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2107	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2108	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2109	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2110	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2111
2112static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2113	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2114	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2115	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2116	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2117	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2118	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2119	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2120	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2121
2122static void nv_gear_backoff_reseed(struct net_device *dev)
2123{
2124	u8 __iomem *base = get_hwbase(dev);
2125	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2126	u32 temp, seedset, combinedSeed;
2127	int i;
2128
2129	/* Setup seed for free running LFSR */
2130	/* We are going to read the time stamp counter 3 times
2131	   and swizzle bits around to increase randomness */
2132	get_random_bytes(&miniseed1, sizeof(miniseed1));
2133	miniseed1 &= 0x0fff;
2134	if (miniseed1 == 0)
2135		miniseed1 = 0xabc;
2136
2137	get_random_bytes(&miniseed2, sizeof(miniseed2));
2138	miniseed2 &= 0x0fff;
2139	if (miniseed2 == 0)
2140		miniseed2 = 0xabc;
2141	miniseed2_reversed =
2142		((miniseed2 & 0xF00) >> 8) |
2143		 (miniseed2 & 0x0F0) |
2144		 ((miniseed2 & 0x00F) << 8);
2145
2146	get_random_bytes(&miniseed3, sizeof(miniseed3));
2147	miniseed3 &= 0x0fff;
2148	if (miniseed3 == 0)
2149		miniseed3 = 0xabc;
2150	miniseed3_reversed =
2151		((miniseed3 & 0xF00) >> 8) |
2152		 (miniseed3 & 0x0F0) |
2153		 ((miniseed3 & 0x00F) << 8);
2154
2155	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2156		       (miniseed2 ^ miniseed3_reversed);
2157
2158	/* Seeds can not be zero */
2159	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2160		combinedSeed |= 0x08;
2161	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2162		combinedSeed |= 0x8000;
2163
2164	/* No need to disable tx here */
2165	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2166	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2167	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2168	writel(temp, base + NvRegBackOffControl);
2169
2170	/* Setup seeds for all gear LFSRs. */
2171	get_random_bytes(&seedset, sizeof(seedset));
2172	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2173	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2174		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2175		temp |= main_seedset[seedset][i-1] & 0x3ff;
2176		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2177		writel(temp, base + NvRegBackOffControl);
2178	}
2179}
2180
2181/*
2182 * nv_start_xmit: dev->hard_start_xmit function
2183 * Called with netif_tx_lock held.
2184 */
2185static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2186{
2187	struct fe_priv *np = netdev_priv(dev);
2188	u32 tx_flags = 0;
2189	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2190	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2191	unsigned int i;
2192	u32 offset = 0;
2193	u32 bcnt;
2194	u32 size = skb_headlen(skb);
2195	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2196	u32 empty_slots;
2197	struct ring_desc *put_tx;
2198	struct ring_desc *start_tx;
2199	struct ring_desc *prev_tx;
2200	struct nv_skb_map *prev_tx_ctx;
2201	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2202	unsigned long flags;
 
2203
2204	/* add fragments to entries count */
2205	for (i = 0; i < fragments; i++) {
2206		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2207
2208		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2209			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2210	}
2211
2212	spin_lock_irqsave(&np->lock, flags);
2213	empty_slots = nv_get_empty_tx_slots(np);
2214	if (unlikely(empty_slots <= entries)) {
2215		netif_stop_queue(dev);
2216		np->tx_stop = 1;
2217		spin_unlock_irqrestore(&np->lock, flags);
2218		return NETDEV_TX_BUSY;
 
 
 
 
 
2219	}
2220	spin_unlock_irqrestore(&np->lock, flags);
2221
2222	start_tx = put_tx = np->put_tx.orig;
2223
2224	/* setup the header buffer */
2225	do {
2226		prev_tx = put_tx;
2227		prev_tx_ctx = np->put_tx_ctx;
2228		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2229		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2230						PCI_DMA_TODEVICE);
2231		if (pci_dma_mapping_error(np->pci_dev,
2232					  np->put_tx_ctx->dma)) {
 
2233			/* on DMA mapping error - drop the packet */
2234			dev_kfree_skb_any(skb);
2235			u64_stats_update_begin(&np->swstats_tx_syncp);
2236			np->stat_tx_dropped++;
2237			u64_stats_update_end(&np->swstats_tx_syncp);
2238			return NETDEV_TX_OK;
 
 
 
2239		}
2240		np->put_tx_ctx->dma_len = bcnt;
2241		np->put_tx_ctx->dma_single = 1;
2242		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2243		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2244
2245		tx_flags = np->tx_flags;
2246		offset += bcnt;
2247		size -= bcnt;
2248		if (unlikely(put_tx++ == np->last_tx.orig))
2249			put_tx = np->first_tx.orig;
2250		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2251			np->put_tx_ctx = np->first_tx_ctx;
2252	} while (size);
2253
2254	/* setup the fragments */
2255	for (i = 0; i < fragments; i++) {
2256		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2257		u32 frag_size = skb_frag_size(frag);
2258		offset = 0;
2259
2260		do {
2261			prev_tx = put_tx;
2262			prev_tx_ctx = np->put_tx_ctx;
2263			if (!start_tx_ctx)
2264				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2265
2266			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2267			np->put_tx_ctx->dma = skb_frag_dma_map(
2268							&np->pci_dev->dev,
2269							frag, offset,
2270							bcnt,
2271							DMA_TO_DEVICE);
2272			if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
 
2273
2274				/* Unwind the mapped fragments */
2275				do {
2276					nv_unmap_txskb(np, start_tx_ctx);
2277					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2278						tmp_tx_ctx = np->first_tx_ctx;
2279				} while (tmp_tx_ctx != np->put_tx_ctx);
2280				dev_kfree_skb_any(skb);
2281				np->put_tx_ctx = start_tx_ctx;
2282				u64_stats_update_begin(&np->swstats_tx_syncp);
2283				np->stat_tx_dropped++;
2284				u64_stats_update_end(&np->swstats_tx_syncp);
2285				return NETDEV_TX_OK;
 
 
 
2286			}
2287
2288			np->put_tx_ctx->dma_len = bcnt;
2289			np->put_tx_ctx->dma_single = 0;
2290			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2291			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2292
2293			offset += bcnt;
2294			frag_size -= bcnt;
2295			if (unlikely(put_tx++ == np->last_tx.orig))
2296				put_tx = np->first_tx.orig;
2297			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2298				np->put_tx_ctx = np->first_tx_ctx;
2299		} while (frag_size);
2300	}
2301
 
 
 
 
 
 
 
 
 
 
2302	/* set last fragment flag  */
2303	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2304
2305	/* save skb in this slot's context area */
2306	prev_tx_ctx->skb = skb;
2307
2308	if (skb_is_gso(skb))
2309		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2310	else
2311		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2312			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2313
2314	spin_lock_irqsave(&np->lock, flags);
2315
2316	/* set tx flags */
2317	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2318
2319	netdev_sent_queue(np->dev, skb->len);
2320
2321	skb_tx_timestamp(skb);
2322
2323	np->put_tx.orig = put_tx;
2324
2325	spin_unlock_irqrestore(&np->lock, flags);
2326
2327	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2328	return NETDEV_TX_OK;
 
 
 
 
 
 
 
2329}
2330
2331static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2332					   struct net_device *dev)
2333{
2334	struct fe_priv *np = netdev_priv(dev);
2335	u32 tx_flags = 0;
2336	u32 tx_flags_extra;
2337	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2338	unsigned int i;
2339	u32 offset = 0;
2340	u32 bcnt;
2341	u32 size = skb_headlen(skb);
2342	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2343	u32 empty_slots;
2344	struct ring_desc_ex *put_tx;
2345	struct ring_desc_ex *start_tx;
2346	struct ring_desc_ex *prev_tx;
2347	struct nv_skb_map *prev_tx_ctx;
2348	struct nv_skb_map *start_tx_ctx = NULL;
2349	struct nv_skb_map *tmp_tx_ctx = NULL;
2350	unsigned long flags;
 
2351
2352	/* add fragments to entries count */
2353	for (i = 0; i < fragments; i++) {
2354		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2355
2356		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2357			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2358	}
2359
2360	spin_lock_irqsave(&np->lock, flags);
2361	empty_slots = nv_get_empty_tx_slots(np);
2362	if (unlikely(empty_slots <= entries)) {
2363		netif_stop_queue(dev);
2364		np->tx_stop = 1;
2365		spin_unlock_irqrestore(&np->lock, flags);
2366		return NETDEV_TX_BUSY;
 
 
 
 
 
 
2367	}
2368	spin_unlock_irqrestore(&np->lock, flags);
2369
2370	start_tx = put_tx = np->put_tx.ex;
2371	start_tx_ctx = np->put_tx_ctx;
2372
2373	/* setup the header buffer */
2374	do {
2375		prev_tx = put_tx;
2376		prev_tx_ctx = np->put_tx_ctx;
2377		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2378		np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2379						PCI_DMA_TODEVICE);
2380		if (pci_dma_mapping_error(np->pci_dev,
2381					  np->put_tx_ctx->dma)) {
 
2382			/* on DMA mapping error - drop the packet */
2383			dev_kfree_skb_any(skb);
2384			u64_stats_update_begin(&np->swstats_tx_syncp);
2385			np->stat_tx_dropped++;
2386			u64_stats_update_end(&np->swstats_tx_syncp);
2387			return NETDEV_TX_OK;
 
 
 
2388		}
2389		np->put_tx_ctx->dma_len = bcnt;
2390		np->put_tx_ctx->dma_single = 1;
2391		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2392		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2393		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2394
2395		tx_flags = NV_TX2_VALID;
2396		offset += bcnt;
2397		size -= bcnt;
2398		if (unlikely(put_tx++ == np->last_tx.ex))
2399			put_tx = np->first_tx.ex;
2400		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2401			np->put_tx_ctx = np->first_tx_ctx;
2402	} while (size);
2403
2404	/* setup the fragments */
2405	for (i = 0; i < fragments; i++) {
2406		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2407		u32 frag_size = skb_frag_size(frag);
2408		offset = 0;
2409
2410		do {
2411			prev_tx = put_tx;
2412			prev_tx_ctx = np->put_tx_ctx;
2413			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2414			if (!start_tx_ctx)
2415				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2416			np->put_tx_ctx->dma = skb_frag_dma_map(
2417							&np->pci_dev->dev,
2418							frag, offset,
2419							bcnt,
2420							DMA_TO_DEVICE);
2421
2422			if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
 
2423
2424				/* Unwind the mapped fragments */
2425				do {
2426					nv_unmap_txskb(np, start_tx_ctx);
2427					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2428						tmp_tx_ctx = np->first_tx_ctx;
2429				} while (tmp_tx_ctx != np->put_tx_ctx);
2430				dev_kfree_skb_any(skb);
2431				np->put_tx_ctx = start_tx_ctx;
2432				u64_stats_update_begin(&np->swstats_tx_syncp);
2433				np->stat_tx_dropped++;
2434				u64_stats_update_end(&np->swstats_tx_syncp);
2435				return NETDEV_TX_OK;
 
 
 
2436			}
2437			np->put_tx_ctx->dma_len = bcnt;
2438			np->put_tx_ctx->dma_single = 0;
2439			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2440			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2441			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2442
2443			offset += bcnt;
2444			frag_size -= bcnt;
2445			if (unlikely(put_tx++ == np->last_tx.ex))
2446				put_tx = np->first_tx.ex;
2447			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2448				np->put_tx_ctx = np->first_tx_ctx;
2449		} while (frag_size);
2450	}
2451
 
 
 
 
 
 
 
 
 
 
2452	/* set last fragment flag  */
2453	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2454
2455	/* save skb in this slot's context area */
2456	prev_tx_ctx->skb = skb;
2457
2458	if (skb_is_gso(skb))
2459		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2460	else
2461		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2462			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2463
2464	/* vlan tag */
2465	if (vlan_tx_tag_present(skb))
2466		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2467					vlan_tx_tag_get(skb));
2468	else
2469		start_tx->txvlan = 0;
2470
2471	spin_lock_irqsave(&np->lock, flags);
2472
2473	if (np->tx_limit) {
2474		/* Limit the number of outstanding tx. Setup all fragments, but
2475		 * do not set the VALID bit on the first descriptor. Save a pointer
2476		 * to that descriptor and also for next skb_map element.
2477		 */
2478
2479		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2480			if (!np->tx_change_owner)
2481				np->tx_change_owner = start_tx_ctx;
2482
2483			/* remove VALID bit */
2484			tx_flags &= ~NV_TX2_VALID;
2485			start_tx_ctx->first_tx_desc = start_tx;
2486			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2487			np->tx_end_flip = np->put_tx_ctx;
2488		} else {
2489			np->tx_pkts_in_progress++;
2490		}
2491	}
2492
2493	/* set tx flags */
2494	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2495
2496	netdev_sent_queue(np->dev, skb->len);
2497
2498	skb_tx_timestamp(skb);
2499
2500	np->put_tx.ex = put_tx;
2501
2502	spin_unlock_irqrestore(&np->lock, flags);
2503
2504	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2505	return NETDEV_TX_OK;
 
 
 
 
 
 
 
2506}
2507
2508static inline void nv_tx_flip_ownership(struct net_device *dev)
2509{
2510	struct fe_priv *np = netdev_priv(dev);
2511
2512	np->tx_pkts_in_progress--;
2513	if (np->tx_change_owner) {
2514		np->tx_change_owner->first_tx_desc->flaglen |=
2515			cpu_to_le32(NV_TX2_VALID);
2516		np->tx_pkts_in_progress++;
2517
2518		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2519		if (np->tx_change_owner == np->tx_end_flip)
2520			np->tx_change_owner = NULL;
2521
2522		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2523	}
2524}
2525
2526/*
2527 * nv_tx_done: check for completed packets, release the skbs.
2528 *
2529 * Caller must own np->lock.
2530 */
2531static int nv_tx_done(struct net_device *dev, int limit)
2532{
2533	struct fe_priv *np = netdev_priv(dev);
2534	u32 flags;
2535	int tx_work = 0;
2536	struct ring_desc *orig_get_tx = np->get_tx.orig;
2537	unsigned int bytes_compl = 0;
2538
2539	while ((np->get_tx.orig != np->put_tx.orig) &&
2540	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2541	       (tx_work < limit)) {
2542
2543		nv_unmap_txskb(np, np->get_tx_ctx);
2544
2545		if (np->desc_ver == DESC_VER_1) {
2546			if (flags & NV_TX_LASTPACKET) {
2547				if (flags & NV_TX_ERROR) {
2548					if ((flags & NV_TX_RETRYERROR)
2549					    && !(flags & NV_TX_RETRYCOUNT_MASK))
2550						nv_legacybackoff_reseed(dev);
2551				} else {
 
 
2552					u64_stats_update_begin(&np->swstats_tx_syncp);
2553					np->stat_tx_packets++;
2554					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
 
2555					u64_stats_update_end(&np->swstats_tx_syncp);
2556				}
2557				bytes_compl += np->get_tx_ctx->skb->len;
2558				dev_kfree_skb_any(np->get_tx_ctx->skb);
2559				np->get_tx_ctx->skb = NULL;
2560				tx_work++;
2561			}
2562		} else {
2563			if (flags & NV_TX2_LASTPACKET) {
2564				if (flags & NV_TX2_ERROR) {
2565					if ((flags & NV_TX2_RETRYERROR)
2566					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
2567						nv_legacybackoff_reseed(dev);
2568				} else {
 
 
2569					u64_stats_update_begin(&np->swstats_tx_syncp);
2570					np->stat_tx_packets++;
2571					np->stat_tx_bytes += np->get_tx_ctx->skb->len;
 
2572					u64_stats_update_end(&np->swstats_tx_syncp);
2573				}
2574				bytes_compl += np->get_tx_ctx->skb->len;
2575				dev_kfree_skb_any(np->get_tx_ctx->skb);
2576				np->get_tx_ctx->skb = NULL;
2577				tx_work++;
2578			}
2579		}
2580		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2581			np->get_tx.orig = np->first_tx.orig;
2582		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2583			np->get_tx_ctx = np->first_tx_ctx;
2584	}
2585
2586	netdev_completed_queue(np->dev, tx_work, bytes_compl);
2587
2588	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2589		np->tx_stop = 0;
2590		netif_wake_queue(dev);
2591	}
2592	return tx_work;
2593}
2594
2595static int nv_tx_done_optimized(struct net_device *dev, int limit)
2596{
2597	struct fe_priv *np = netdev_priv(dev);
2598	u32 flags;
2599	int tx_work = 0;
2600	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2601	unsigned long bytes_cleaned = 0;
2602
2603	while ((np->get_tx.ex != np->put_tx.ex) &&
2604	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2605	       (tx_work < limit)) {
2606
2607		nv_unmap_txskb(np, np->get_tx_ctx);
2608
2609		if (flags & NV_TX2_LASTPACKET) {
2610			if (flags & NV_TX2_ERROR) {
2611				if ((flags & NV_TX2_RETRYERROR)
2612				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2613					if (np->driver_data & DEV_HAS_GEAR_MODE)
2614						nv_gear_backoff_reseed(dev);
2615					else
2616						nv_legacybackoff_reseed(dev);
2617				}
2618			} else {
 
 
2619				u64_stats_update_begin(&np->swstats_tx_syncp);
2620				np->stat_tx_packets++;
2621				np->stat_tx_bytes += np->get_tx_ctx->skb->len;
 
2622				u64_stats_update_end(&np->swstats_tx_syncp);
2623			}
2624
2625			bytes_cleaned += np->get_tx_ctx->skb->len;
2626			dev_kfree_skb_any(np->get_tx_ctx->skb);
2627			np->get_tx_ctx->skb = NULL;
2628			tx_work++;
2629
2630			if (np->tx_limit)
2631				nv_tx_flip_ownership(dev);
2632		}
2633
2634		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2635			np->get_tx.ex = np->first_tx.ex;
2636		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2637			np->get_tx_ctx = np->first_tx_ctx;
2638	}
2639
2640	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2641
2642	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2643		np->tx_stop = 0;
2644		netif_wake_queue(dev);
2645	}
2646	return tx_work;
2647}
2648
2649/*
2650 * nv_tx_timeout: dev->tx_timeout function
2651 * Called with netif_tx_lock held.
2652 */
2653static void nv_tx_timeout(struct net_device *dev)
2654{
2655	struct fe_priv *np = netdev_priv(dev);
2656	u8 __iomem *base = get_hwbase(dev);
2657	u32 status;
2658	union ring_type put_tx;
2659	int saved_tx_limit;
2660
2661	if (np->msi_flags & NV_MSI_X_ENABLED)
2662		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2663	else
2664		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2665
2666	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2667
2668	if (unlikely(debug_tx_timeout)) {
2669		int i;
2670
2671		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2672		netdev_info(dev, "Dumping tx registers\n");
2673		for (i = 0; i <= np->register_size; i += 32) {
2674			netdev_info(dev,
2675				    "%3x: %08x %08x %08x %08x "
2676				    "%08x %08x %08x %08x\n",
2677				    i,
2678				    readl(base + i + 0), readl(base + i + 4),
2679				    readl(base + i + 8), readl(base + i + 12),
2680				    readl(base + i + 16), readl(base + i + 20),
2681				    readl(base + i + 24), readl(base + i + 28));
2682		}
2683		netdev_info(dev, "Dumping tx ring\n");
2684		for (i = 0; i < np->tx_ring_size; i += 4) {
2685			if (!nv_optimized(np)) {
2686				netdev_info(dev,
2687					    "%03x: %08x %08x // %08x %08x "
2688					    "// %08x %08x // %08x %08x\n",
2689					    i,
2690					    le32_to_cpu(np->tx_ring.orig[i].buf),
2691					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2692					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2693					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2694					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2695					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2696					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2697					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2698			} else {
2699				netdev_info(dev,
2700					    "%03x: %08x %08x %08x "
2701					    "// %08x %08x %08x "
2702					    "// %08x %08x %08x "
2703					    "// %08x %08x %08x\n",
2704					    i,
2705					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2706					    le32_to_cpu(np->tx_ring.ex[i].buflow),
2707					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2708					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2709					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2710					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2711					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2712					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2713					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2714					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2715					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2716					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2717			}
2718		}
2719	}
2720
2721	spin_lock_irq(&np->lock);
2722
2723	/* 1) stop tx engine */
2724	nv_stop_tx(dev);
2725
2726	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2727	saved_tx_limit = np->tx_limit;
2728	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2729	np->tx_stop = 0;  /* prevent waking tx queue */
2730	if (!nv_optimized(np))
2731		nv_tx_done(dev, np->tx_ring_size);
2732	else
2733		nv_tx_done_optimized(dev, np->tx_ring_size);
2734
2735	/* save current HW position */
2736	if (np->tx_change_owner)
2737		put_tx.ex = np->tx_change_owner->first_tx_desc;
2738	else
2739		put_tx = np->put_tx;
2740
2741	/* 3) clear all tx state */
2742	nv_drain_tx(dev);
2743	nv_init_tx(dev);
2744
2745	/* 4) restore state to current HW position */
2746	np->get_tx = np->put_tx = put_tx;
2747	np->tx_limit = saved_tx_limit;
2748
2749	/* 5) restart tx engine */
2750	nv_start_tx(dev);
2751	netif_wake_queue(dev);
2752	spin_unlock_irq(&np->lock);
2753}
2754
2755/*
2756 * Called when the nic notices a mismatch between the actual data len on the
2757 * wire and the len indicated in the 802 header
2758 */
2759static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2760{
2761	int hdrlen;	/* length of the 802 header */
2762	int protolen;	/* length as stored in the proto field */
2763
2764	/* 1) calculate len according to header */
2765	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2766		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2767		hdrlen = VLAN_HLEN;
2768	} else {
2769		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2770		hdrlen = ETH_HLEN;
2771	}
2772	if (protolen > ETH_DATA_LEN)
2773		return datalen; /* Value in proto field not a len, no checks possible */
2774
2775	protolen += hdrlen;
2776	/* consistency checks: */
2777	if (datalen > ETH_ZLEN) {
2778		if (datalen >= protolen) {
2779			/* more data on wire than in 802 header, trim of
2780			 * additional data.
2781			 */
2782			return protolen;
2783		} else {
2784			/* less data on wire than mentioned in header.
2785			 * Discard the packet.
2786			 */
2787			return -1;
2788		}
2789	} else {
2790		/* short packet. Accept only if 802 values are also short */
2791		if (protolen > ETH_ZLEN) {
2792			return -1;
2793		}
2794		return datalen;
2795	}
2796}
2797
 
 
 
 
 
 
 
 
 
2798static int nv_rx_process(struct net_device *dev, int limit)
2799{
2800	struct fe_priv *np = netdev_priv(dev);
2801	u32 flags;
2802	int rx_work = 0;
2803	struct sk_buff *skb;
2804	int len;
2805
2806	while ((np->get_rx.orig != np->put_rx.orig) &&
2807	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2808		(rx_work < limit)) {
2809
2810		/*
2811		 * the packet is for us - immediately tear down the pci mapping.
2812		 * TODO: check if a prefetch of the first cacheline improves
2813		 * the performance.
2814		 */
2815		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2816				np->get_rx_ctx->dma_len,
2817				PCI_DMA_FROMDEVICE);
2818		skb = np->get_rx_ctx->skb;
2819		np->get_rx_ctx->skb = NULL;
2820
2821		/* look at what we actually got: */
2822		if (np->desc_ver == DESC_VER_1) {
2823			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2824				len = flags & LEN_MASK_V1;
2825				if (unlikely(flags & NV_RX_ERROR)) {
2826					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2827						len = nv_getlen(dev, skb->data, len);
2828						if (len < 0) {
2829							dev_kfree_skb(skb);
2830							goto next_pkt;
2831						}
2832					}
2833					/* framing errors are soft errors */
2834					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2835						if (flags & NV_RX_SUBSTRACT1)
2836							len--;
2837					}
2838					/* the rest are hard errors */
2839					else {
2840						if (flags & NV_RX_MISSEDFRAME) {
2841							u64_stats_update_begin(&np->swstats_rx_syncp);
2842							np->stat_rx_missed_errors++;
2843							u64_stats_update_end(&np->swstats_rx_syncp);
2844						}
2845						dev_kfree_skb(skb);
2846						goto next_pkt;
2847					}
2848				}
2849			} else {
2850				dev_kfree_skb(skb);
2851				goto next_pkt;
2852			}
2853		} else {
2854			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2855				len = flags & LEN_MASK_V2;
2856				if (unlikely(flags & NV_RX2_ERROR)) {
2857					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2858						len = nv_getlen(dev, skb->data, len);
2859						if (len < 0) {
2860							dev_kfree_skb(skb);
2861							goto next_pkt;
2862						}
2863					}
2864					/* framing errors are soft errors */
2865					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2866						if (flags & NV_RX2_SUBSTRACT1)
2867							len--;
2868					}
2869					/* the rest are hard errors */
2870					else {
2871						dev_kfree_skb(skb);
2872						goto next_pkt;
2873					}
2874				}
2875				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2876				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2877					skb->ip_summed = CHECKSUM_UNNECESSARY;
2878			} else {
2879				dev_kfree_skb(skb);
2880				goto next_pkt;
2881			}
2882		}
2883		/* got a valid packet - forward it to the network core */
2884		skb_put(skb, len);
2885		skb->protocol = eth_type_trans(skb, dev);
2886		napi_gro_receive(&np->napi, skb);
2887		u64_stats_update_begin(&np->swstats_rx_syncp);
2888		np->stat_rx_packets++;
2889		np->stat_rx_bytes += len;
2890		u64_stats_update_end(&np->swstats_rx_syncp);
2891next_pkt:
2892		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2893			np->get_rx.orig = np->first_rx.orig;
2894		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2895			np->get_rx_ctx = np->first_rx_ctx;
2896
2897		rx_work++;
2898	}
2899
2900	return rx_work;
2901}
2902
2903static int nv_rx_process_optimized(struct net_device *dev, int limit)
2904{
2905	struct fe_priv *np = netdev_priv(dev);
2906	u32 flags;
2907	u32 vlanflags = 0;
2908	int rx_work = 0;
2909	struct sk_buff *skb;
2910	int len;
2911
2912	while ((np->get_rx.ex != np->put_rx.ex) &&
2913	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2914	      (rx_work < limit)) {
2915
2916		/*
2917		 * the packet is for us - immediately tear down the pci mapping.
2918		 * TODO: check if a prefetch of the first cacheline improves
2919		 * the performance.
2920		 */
2921		pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2922				np->get_rx_ctx->dma_len,
2923				PCI_DMA_FROMDEVICE);
2924		skb = np->get_rx_ctx->skb;
2925		np->get_rx_ctx->skb = NULL;
2926
2927		/* look at what we actually got: */
2928		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2929			len = flags & LEN_MASK_V2;
2930			if (unlikely(flags & NV_RX2_ERROR)) {
2931				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2932					len = nv_getlen(dev, skb->data, len);
2933					if (len < 0) {
2934						dev_kfree_skb(skb);
2935						goto next_pkt;
2936					}
2937				}
2938				/* framing errors are soft errors */
2939				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2940					if (flags & NV_RX2_SUBSTRACT1)
2941						len--;
2942				}
2943				/* the rest are hard errors */
2944				else {
2945					dev_kfree_skb(skb);
2946					goto next_pkt;
2947				}
2948			}
2949
2950			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2951			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2952				skb->ip_summed = CHECKSUM_UNNECESSARY;
2953
2954			/* got a valid packet - forward it to the network core */
2955			skb_put(skb, len);
2956			skb->protocol = eth_type_trans(skb, dev);
2957			prefetch(skb->data);
2958
2959			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2960
2961			/*
2962			 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
2963			 * here. Even if vlan rx accel is disabled,
2964			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2965			 */
2966			if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2967			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2968				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2969
2970				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2971			}
2972			napi_gro_receive(&np->napi, skb);
2973			u64_stats_update_begin(&np->swstats_rx_syncp);
2974			np->stat_rx_packets++;
2975			np->stat_rx_bytes += len;
2976			u64_stats_update_end(&np->swstats_rx_syncp);
2977		} else {
2978			dev_kfree_skb(skb);
2979		}
2980next_pkt:
2981		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2982			np->get_rx.ex = np->first_rx.ex;
2983		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2984			np->get_rx_ctx = np->first_rx_ctx;
2985
2986		rx_work++;
2987	}
2988
2989	return rx_work;
2990}
2991
2992static void set_bufsize(struct net_device *dev)
2993{
2994	struct fe_priv *np = netdev_priv(dev);
2995
2996	if (dev->mtu <= ETH_DATA_LEN)
2997		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2998	else
2999		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3000}
3001
3002/*
3003 * nv_change_mtu: dev->change_mtu function
3004 * Called with dev_base_lock held for read.
3005 */
3006static int nv_change_mtu(struct net_device *dev, int new_mtu)
3007{
3008	struct fe_priv *np = netdev_priv(dev);
3009	int old_mtu;
3010
3011	if (new_mtu < 64 || new_mtu > np->pkt_limit)
3012		return -EINVAL;
3013
3014	old_mtu = dev->mtu;
3015	dev->mtu = new_mtu;
3016
3017	/* return early if the buffer sizes will not change */
3018	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3019		return 0;
3020	if (old_mtu == new_mtu)
3021		return 0;
3022
3023	/* synchronized against open : rtnl_lock() held by caller */
3024	if (netif_running(dev)) {
3025		u8 __iomem *base = get_hwbase(dev);
3026		/*
3027		 * It seems that the nic preloads valid ring entries into an
3028		 * internal buffer. The procedure for flushing everything is
3029		 * guessed, there is probably a simpler approach.
3030		 * Changing the MTU is a rare event, it shouldn't matter.
3031		 */
3032		nv_disable_irq(dev);
3033		nv_napi_disable(dev);
3034		netif_tx_lock_bh(dev);
3035		netif_addr_lock(dev);
3036		spin_lock(&np->lock);
3037		/* stop engines */
3038		nv_stop_rxtx(dev);
3039		nv_txrx_reset(dev);
3040		/* drain rx queue */
3041		nv_drain_rxtx(dev);
3042		/* reinit driver view of the rx queue */
3043		set_bufsize(dev);
3044		if (nv_init_ring(dev)) {
3045			if (!np->in_shutdown)
3046				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3047		}
3048		/* reinit nic view of the rx queue */
3049		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3050		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3051		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3052			base + NvRegRingSizes);
3053		pci_push(base);
3054		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3055		pci_push(base);
3056
3057		/* restart rx engine */
3058		nv_start_rxtx(dev);
3059		spin_unlock(&np->lock);
3060		netif_addr_unlock(dev);
3061		netif_tx_unlock_bh(dev);
3062		nv_napi_enable(dev);
3063		nv_enable_irq(dev);
3064	}
3065	return 0;
3066}
3067
3068static void nv_copy_mac_to_hw(struct net_device *dev)
3069{
3070	u8 __iomem *base = get_hwbase(dev);
3071	u32 mac[2];
3072
3073	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3074			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3075	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3076
3077	writel(mac[0], base + NvRegMacAddrA);
3078	writel(mac[1], base + NvRegMacAddrB);
3079}
3080
3081/*
3082 * nv_set_mac_address: dev->set_mac_address function
3083 * Called with rtnl_lock() held.
3084 */
3085static int nv_set_mac_address(struct net_device *dev, void *addr)
3086{
3087	struct fe_priv *np = netdev_priv(dev);
3088	struct sockaddr *macaddr = (struct sockaddr *)addr;
3089
3090	if (!is_valid_ether_addr(macaddr->sa_data))
3091		return -EADDRNOTAVAIL;
3092
3093	/* synchronized against open : rtnl_lock() held by caller */
3094	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
3095
3096	if (netif_running(dev)) {
3097		netif_tx_lock_bh(dev);
3098		netif_addr_lock(dev);
3099		spin_lock_irq(&np->lock);
3100
3101		/* stop rx engine */
3102		nv_stop_rx(dev);
3103
3104		/* set mac address */
3105		nv_copy_mac_to_hw(dev);
3106
3107		/* restart rx engine */
3108		nv_start_rx(dev);
3109		spin_unlock_irq(&np->lock);
3110		netif_addr_unlock(dev);
3111		netif_tx_unlock_bh(dev);
3112	} else {
3113		nv_copy_mac_to_hw(dev);
3114	}
3115	return 0;
3116}
3117
3118/*
3119 * nv_set_multicast: dev->set_multicast function
3120 * Called with netif_tx_lock held.
3121 */
3122static void nv_set_multicast(struct net_device *dev)
3123{
3124	struct fe_priv *np = netdev_priv(dev);
3125	u8 __iomem *base = get_hwbase(dev);
3126	u32 addr[2];
3127	u32 mask[2];
3128	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3129
3130	memset(addr, 0, sizeof(addr));
3131	memset(mask, 0, sizeof(mask));
3132
3133	if (dev->flags & IFF_PROMISC) {
3134		pff |= NVREG_PFF_PROMISC;
3135	} else {
3136		pff |= NVREG_PFF_MYADDR;
3137
3138		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3139			u32 alwaysOff[2];
3140			u32 alwaysOn[2];
3141
3142			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3143			if (dev->flags & IFF_ALLMULTI) {
3144				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3145			} else {
3146				struct netdev_hw_addr *ha;
3147
3148				netdev_for_each_mc_addr(ha, dev) {
3149					unsigned char *hw_addr = ha->addr;
3150					u32 a, b;
3151
3152					a = le32_to_cpu(*(__le32 *) hw_addr);
3153					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3154					alwaysOn[0] &= a;
3155					alwaysOff[0] &= ~a;
3156					alwaysOn[1] &= b;
3157					alwaysOff[1] &= ~b;
3158				}
3159			}
3160			addr[0] = alwaysOn[0];
3161			addr[1] = alwaysOn[1];
3162			mask[0] = alwaysOn[0] | alwaysOff[0];
3163			mask[1] = alwaysOn[1] | alwaysOff[1];
3164		} else {
3165			mask[0] = NVREG_MCASTMASKA_NONE;
3166			mask[1] = NVREG_MCASTMASKB_NONE;
3167		}
3168	}
3169	addr[0] |= NVREG_MCASTADDRA_FORCE;
3170	pff |= NVREG_PFF_ALWAYS;
3171	spin_lock_irq(&np->lock);
3172	nv_stop_rx(dev);
3173	writel(addr[0], base + NvRegMulticastAddrA);
3174	writel(addr[1], base + NvRegMulticastAddrB);
3175	writel(mask[0], base + NvRegMulticastMaskA);
3176	writel(mask[1], base + NvRegMulticastMaskB);
3177	writel(pff, base + NvRegPacketFilterFlags);
3178	nv_start_rx(dev);
3179	spin_unlock_irq(&np->lock);
3180}
3181
3182static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3183{
3184	struct fe_priv *np = netdev_priv(dev);
3185	u8 __iomem *base = get_hwbase(dev);
3186
3187	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3188
3189	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3190		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3191		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3192			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3193			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3194		} else {
3195			writel(pff, base + NvRegPacketFilterFlags);
3196		}
3197	}
3198	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3199		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3200		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3201			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3202			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3203				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3204			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3205				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3206				/* limit the number of tx pause frames to a default of 8 */
3207				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3208			}
3209			writel(pause_enable,  base + NvRegTxPauseFrame);
3210			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3211			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3212		} else {
3213			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3214			writel(regmisc, base + NvRegMisc1);
3215		}
3216	}
3217}
3218
3219static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3220{
3221	struct fe_priv *np = netdev_priv(dev);
3222	u8 __iomem *base = get_hwbase(dev);
3223	u32 phyreg, txreg;
3224	int mii_status;
3225
3226	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3227	np->duplex = duplex;
3228
3229	/* see if gigabit phy */
3230	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3231	if (mii_status & PHY_GIGABIT) {
3232		np->gigabit = PHY_GIGABIT;
3233		phyreg = readl(base + NvRegSlotTime);
3234		phyreg &= ~(0x3FF00);
3235		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3236			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3237		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3238			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3239		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3240			phyreg |= NVREG_SLOTTIME_1000_FULL;
3241		writel(phyreg, base + NvRegSlotTime);
3242	}
3243
3244	phyreg = readl(base + NvRegPhyInterface);
3245	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3246	if (np->duplex == 0)
3247		phyreg |= PHY_HALF;
3248	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3249		phyreg |= PHY_100;
3250	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3251							NVREG_LINKSPEED_1000)
3252		phyreg |= PHY_1000;
3253	writel(phyreg, base + NvRegPhyInterface);
3254
3255	if (phyreg & PHY_RGMII) {
3256		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3257							NVREG_LINKSPEED_1000)
3258			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3259		else
3260			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3261	} else {
3262		txreg = NVREG_TX_DEFERRAL_DEFAULT;
3263	}
3264	writel(txreg, base + NvRegTxDeferral);
3265
3266	if (np->desc_ver == DESC_VER_1) {
3267		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3268	} else {
3269		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3270					 NVREG_LINKSPEED_1000)
3271			txreg = NVREG_TX_WM_DESC2_3_1000;
3272		else
3273			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3274	}
3275	writel(txreg, base + NvRegTxWatermark);
3276
3277	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3278			base + NvRegMisc1);
3279	pci_push(base);
3280	writel(np->linkspeed, base + NvRegLinkSpeed);
3281	pci_push(base);
3282
3283	return;
3284}
3285
3286/**
3287 * nv_update_linkspeed - Setup the MAC according to the link partner
3288 * @dev: Network device to be configured
3289 *
3290 * The function queries the PHY and checks if there is a link partner.
3291 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3292 * set to 10 MBit HD.
3293 *
3294 * The function returns 0 if there is no link partner and 1 if there is
3295 * a good link partner.
3296 */
3297static int nv_update_linkspeed(struct net_device *dev)
3298{
3299	struct fe_priv *np = netdev_priv(dev);
3300	u8 __iomem *base = get_hwbase(dev);
3301	int adv = 0;
3302	int lpa = 0;
3303	int adv_lpa, adv_pause, lpa_pause;
3304	int newls = np->linkspeed;
3305	int newdup = np->duplex;
3306	int mii_status;
3307	u32 bmcr;
3308	int retval = 0;
3309	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3310	u32 txrxFlags = 0;
3311	u32 phy_exp;
3312
3313	/* If device loopback is enabled, set carrier on and enable max link
3314	 * speed.
3315	 */
3316	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3317	if (bmcr & BMCR_LOOPBACK) {
3318		if (netif_running(dev)) {
3319			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3320			if (!netif_carrier_ok(dev))
3321				netif_carrier_on(dev);
3322		}
3323		return 1;
3324	}
3325
3326	/* BMSR_LSTATUS is latched, read it twice:
3327	 * we want the current value.
3328	 */
3329	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3330	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3331
3332	if (!(mii_status & BMSR_LSTATUS)) {
3333		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3334		newdup = 0;
3335		retval = 0;
3336		goto set_speed;
3337	}
3338
3339	if (np->autoneg == 0) {
3340		if (np->fixed_mode & LPA_100FULL) {
3341			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3342			newdup = 1;
3343		} else if (np->fixed_mode & LPA_100HALF) {
3344			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3345			newdup = 0;
3346		} else if (np->fixed_mode & LPA_10FULL) {
3347			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3348			newdup = 1;
3349		} else {
3350			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3351			newdup = 0;
3352		}
3353		retval = 1;
3354		goto set_speed;
3355	}
3356	/* check auto negotiation is complete */
3357	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3358		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3359		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3360		newdup = 0;
3361		retval = 0;
3362		goto set_speed;
3363	}
3364
3365	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3366	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3367
3368	retval = 1;
3369	if (np->gigabit == PHY_GIGABIT) {
3370		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3371		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3372
3373		if ((control_1000 & ADVERTISE_1000FULL) &&
3374			(status_1000 & LPA_1000FULL)) {
3375			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3376			newdup = 1;
3377			goto set_speed;
3378		}
3379	}
3380
3381	/* FIXME: handle parallel detection properly */
3382	adv_lpa = lpa & adv;
3383	if (adv_lpa & LPA_100FULL) {
3384		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3385		newdup = 1;
3386	} else if (adv_lpa & LPA_100HALF) {
3387		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3388		newdup = 0;
3389	} else if (adv_lpa & LPA_10FULL) {
3390		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3391		newdup = 1;
3392	} else if (adv_lpa & LPA_10HALF) {
3393		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3394		newdup = 0;
3395	} else {
3396		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3397		newdup = 0;
3398	}
3399
3400set_speed:
3401	if (np->duplex == newdup && np->linkspeed == newls)
3402		return retval;
3403
3404	np->duplex = newdup;
3405	np->linkspeed = newls;
3406
3407	/* The transmitter and receiver must be restarted for safe update */
3408	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3409		txrxFlags |= NV_RESTART_TX;
3410		nv_stop_tx(dev);
3411	}
3412	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3413		txrxFlags |= NV_RESTART_RX;
3414		nv_stop_rx(dev);
3415	}
3416
3417	if (np->gigabit == PHY_GIGABIT) {
3418		phyreg = readl(base + NvRegSlotTime);
3419		phyreg &= ~(0x3FF00);
3420		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3421		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3422			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3423		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3424			phyreg |= NVREG_SLOTTIME_1000_FULL;
3425		writel(phyreg, base + NvRegSlotTime);
3426	}
3427
3428	phyreg = readl(base + NvRegPhyInterface);
3429	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3430	if (np->duplex == 0)
3431		phyreg |= PHY_HALF;
3432	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3433		phyreg |= PHY_100;
3434	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3435		phyreg |= PHY_1000;
3436	writel(phyreg, base + NvRegPhyInterface);
3437
3438	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3439	if (phyreg & PHY_RGMII) {
3440		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3441			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3442		} else {
3443			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3444				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3445					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3446				else
3447					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3448			} else {
3449				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3450			}
3451		}
3452	} else {
3453		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3454			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3455		else
3456			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3457	}
3458	writel(txreg, base + NvRegTxDeferral);
3459
3460	if (np->desc_ver == DESC_VER_1) {
3461		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3462	} else {
3463		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3464			txreg = NVREG_TX_WM_DESC2_3_1000;
3465		else
3466			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3467	}
3468	writel(txreg, base + NvRegTxWatermark);
3469
3470	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3471		base + NvRegMisc1);
3472	pci_push(base);
3473	writel(np->linkspeed, base + NvRegLinkSpeed);
3474	pci_push(base);
3475
3476	pause_flags = 0;
3477	/* setup pause frame */
3478	if (netif_running(dev) && (np->duplex != 0)) {
3479		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3480			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3481			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3482
3483			switch (adv_pause) {
3484			case ADVERTISE_PAUSE_CAP:
3485				if (lpa_pause & LPA_PAUSE_CAP) {
3486					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3487					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3488						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3489				}
3490				break;
3491			case ADVERTISE_PAUSE_ASYM:
3492				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3493					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3494				break;
3495			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3496				if (lpa_pause & LPA_PAUSE_CAP) {
3497					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3498					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3499						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3500				}
3501				if (lpa_pause == LPA_PAUSE_ASYM)
3502					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3503				break;
3504			}
3505		} else {
3506			pause_flags = np->pause_flags;
3507		}
3508	}
3509	nv_update_pause(dev, pause_flags);
3510
3511	if (txrxFlags & NV_RESTART_TX)
3512		nv_start_tx(dev);
3513	if (txrxFlags & NV_RESTART_RX)
3514		nv_start_rx(dev);
3515
3516	return retval;
3517}
3518
3519static void nv_linkchange(struct net_device *dev)
3520{
3521	if (nv_update_linkspeed(dev)) {
3522		if (!netif_carrier_ok(dev)) {
3523			netif_carrier_on(dev);
3524			netdev_info(dev, "link up\n");
3525			nv_txrx_gate(dev, false);
3526			nv_start_rx(dev);
3527		}
3528	} else {
3529		if (netif_carrier_ok(dev)) {
3530			netif_carrier_off(dev);
3531			netdev_info(dev, "link down\n");
3532			nv_txrx_gate(dev, true);
3533			nv_stop_rx(dev);
3534		}
3535	}
3536}
3537
3538static void nv_link_irq(struct net_device *dev)
3539{
3540	u8 __iomem *base = get_hwbase(dev);
3541	u32 miistat;
3542
3543	miistat = readl(base + NvRegMIIStatus);
3544	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3545
3546	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3547		nv_linkchange(dev);
3548}
3549
3550static void nv_msi_workaround(struct fe_priv *np)
3551{
3552
3553	/* Need to toggle the msi irq mask within the ethernet device,
3554	 * otherwise, future interrupts will not be detected.
3555	 */
3556	if (np->msi_flags & NV_MSI_ENABLED) {
3557		u8 __iomem *base = np->base;
3558
3559		writel(0, base + NvRegMSIIrqMask);
3560		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3561	}
3562}
3563
3564static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3565{
3566	struct fe_priv *np = netdev_priv(dev);
3567
3568	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3569		if (total_work > NV_DYNAMIC_THRESHOLD) {
3570			/* transition to poll based interrupts */
3571			np->quiet_count = 0;
3572			if (np->irqmask != NVREG_IRQMASK_CPU) {
3573				np->irqmask = NVREG_IRQMASK_CPU;
3574				return 1;
3575			}
3576		} else {
3577			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3578				np->quiet_count++;
3579			} else {
3580				/* reached a period of low activity, switch
3581				   to per tx/rx packet interrupts */
3582				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3583					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3584					return 1;
3585				}
3586			}
3587		}
3588	}
3589	return 0;
3590}
3591
3592static irqreturn_t nv_nic_irq(int foo, void *data)
3593{
3594	struct net_device *dev = (struct net_device *) data;
3595	struct fe_priv *np = netdev_priv(dev);
3596	u8 __iomem *base = get_hwbase(dev);
3597
3598	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3599		np->events = readl(base + NvRegIrqStatus);
3600		writel(np->events, base + NvRegIrqStatus);
3601	} else {
3602		np->events = readl(base + NvRegMSIXIrqStatus);
3603		writel(np->events, base + NvRegMSIXIrqStatus);
3604	}
3605	if (!(np->events & np->irqmask))
3606		return IRQ_NONE;
3607
3608	nv_msi_workaround(np);
3609
3610	if (napi_schedule_prep(&np->napi)) {
3611		/*
3612		 * Disable further irq's (msix not enabled with napi)
3613		 */
3614		writel(0, base + NvRegIrqMask);
3615		__napi_schedule(&np->napi);
3616	}
3617
3618	return IRQ_HANDLED;
3619}
3620
3621/* All _optimized functions are used to help increase performance
3622 * (reduce CPU and increase throughput). They use descripter version 3,
3623 * compiler directives, and reduce memory accesses.
3624 */
3625static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3626{
3627	struct net_device *dev = (struct net_device *) data;
3628	struct fe_priv *np = netdev_priv(dev);
3629	u8 __iomem *base = get_hwbase(dev);
3630
3631	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3632		np->events = readl(base + NvRegIrqStatus);
3633		writel(np->events, base + NvRegIrqStatus);
3634	} else {
3635		np->events = readl(base + NvRegMSIXIrqStatus);
3636		writel(np->events, base + NvRegMSIXIrqStatus);
3637	}
3638	if (!(np->events & np->irqmask))
3639		return IRQ_NONE;
3640
3641	nv_msi_workaround(np);
3642
3643	if (napi_schedule_prep(&np->napi)) {
3644		/*
3645		 * Disable further irq's (msix not enabled with napi)
3646		 */
3647		writel(0, base + NvRegIrqMask);
3648		__napi_schedule(&np->napi);
3649	}
3650
3651	return IRQ_HANDLED;
3652}
3653
3654static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3655{
3656	struct net_device *dev = (struct net_device *) data;
3657	struct fe_priv *np = netdev_priv(dev);
3658	u8 __iomem *base = get_hwbase(dev);
3659	u32 events;
3660	int i;
3661	unsigned long flags;
3662
3663	for (i = 0;; i++) {
3664		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3665		writel(events, base + NvRegMSIXIrqStatus);
3666		netdev_dbg(dev, "tx irq events: %08x\n", events);
3667		if (!(events & np->irqmask))
3668			break;
3669
3670		spin_lock_irqsave(&np->lock, flags);
3671		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3672		spin_unlock_irqrestore(&np->lock, flags);
3673
3674		if (unlikely(i > max_interrupt_work)) {
3675			spin_lock_irqsave(&np->lock, flags);
3676			/* disable interrupts on the nic */
3677			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3678			pci_push(base);
3679
3680			if (!np->in_shutdown) {
3681				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3682				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3683			}
3684			spin_unlock_irqrestore(&np->lock, flags);
3685			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3686				   __func__, i);
3687			break;
3688		}
3689
3690	}
3691
3692	return IRQ_RETVAL(i);
3693}
3694
3695static int nv_napi_poll(struct napi_struct *napi, int budget)
3696{
3697	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3698	struct net_device *dev = np->dev;
3699	u8 __iomem *base = get_hwbase(dev);
3700	unsigned long flags;
3701	int retcode;
3702	int rx_count, tx_work = 0, rx_work = 0;
3703
3704	do {
3705		if (!nv_optimized(np)) {
3706			spin_lock_irqsave(&np->lock, flags);
3707			tx_work += nv_tx_done(dev, np->tx_ring_size);
3708			spin_unlock_irqrestore(&np->lock, flags);
3709
3710			rx_count = nv_rx_process(dev, budget - rx_work);
3711			retcode = nv_alloc_rx(dev);
3712		} else {
3713			spin_lock_irqsave(&np->lock, flags);
3714			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3715			spin_unlock_irqrestore(&np->lock, flags);
3716
3717			rx_count = nv_rx_process_optimized(dev,
3718			    budget - rx_work);
3719			retcode = nv_alloc_rx_optimized(dev);
3720		}
3721	} while (retcode == 0 &&
3722		 rx_count > 0 && (rx_work += rx_count) < budget);
3723
3724	if (retcode) {
3725		spin_lock_irqsave(&np->lock, flags);
3726		if (!np->in_shutdown)
3727			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3728		spin_unlock_irqrestore(&np->lock, flags);
3729	}
3730
3731	nv_change_interrupt_mode(dev, tx_work + rx_work);
3732
3733	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3734		spin_lock_irqsave(&np->lock, flags);
3735		nv_link_irq(dev);
3736		spin_unlock_irqrestore(&np->lock, flags);
3737	}
3738	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3739		spin_lock_irqsave(&np->lock, flags);
3740		nv_linkchange(dev);
3741		spin_unlock_irqrestore(&np->lock, flags);
3742		np->link_timeout = jiffies + LINK_TIMEOUT;
3743	}
3744	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3745		spin_lock_irqsave(&np->lock, flags);
3746		if (!np->in_shutdown) {
3747			np->nic_poll_irq = np->irqmask;
3748			np->recover_error = 1;
3749			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3750		}
3751		spin_unlock_irqrestore(&np->lock, flags);
3752		napi_complete(napi);
3753		return rx_work;
3754	}
3755
3756	if (rx_work < budget) {
3757		/* re-enable interrupts
3758		   (msix not enabled in napi) */
3759		napi_complete(napi);
3760
3761		writel(np->irqmask, base + NvRegIrqMask);
3762	}
3763	return rx_work;
3764}
3765
3766static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3767{
3768	struct net_device *dev = (struct net_device *) data;
3769	struct fe_priv *np = netdev_priv(dev);
3770	u8 __iomem *base = get_hwbase(dev);
3771	u32 events;
3772	int i;
3773	unsigned long flags;
3774
3775	for (i = 0;; i++) {
3776		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3777		writel(events, base + NvRegMSIXIrqStatus);
3778		netdev_dbg(dev, "rx irq events: %08x\n", events);
3779		if (!(events & np->irqmask))
3780			break;
3781
3782		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3783			if (unlikely(nv_alloc_rx_optimized(dev))) {
3784				spin_lock_irqsave(&np->lock, flags);
3785				if (!np->in_shutdown)
3786					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3787				spin_unlock_irqrestore(&np->lock, flags);
3788			}
3789		}
3790
3791		if (unlikely(i > max_interrupt_work)) {
3792			spin_lock_irqsave(&np->lock, flags);
3793			/* disable interrupts on the nic */
3794			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3795			pci_push(base);
3796
3797			if (!np->in_shutdown) {
3798				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3799				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3800			}
3801			spin_unlock_irqrestore(&np->lock, flags);
3802			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3803				   __func__, i);
3804			break;
3805		}
3806	}
3807
3808	return IRQ_RETVAL(i);
3809}
3810
3811static irqreturn_t nv_nic_irq_other(int foo, void *data)
3812{
3813	struct net_device *dev = (struct net_device *) data;
3814	struct fe_priv *np = netdev_priv(dev);
3815	u8 __iomem *base = get_hwbase(dev);
3816	u32 events;
3817	int i;
3818	unsigned long flags;
3819
3820	for (i = 0;; i++) {
3821		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3822		writel(events, base + NvRegMSIXIrqStatus);
3823		netdev_dbg(dev, "irq events: %08x\n", events);
3824		if (!(events & np->irqmask))
3825			break;
3826
3827		/* check tx in case we reached max loop limit in tx isr */
3828		spin_lock_irqsave(&np->lock, flags);
3829		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3830		spin_unlock_irqrestore(&np->lock, flags);
3831
3832		if (events & NVREG_IRQ_LINK) {
3833			spin_lock_irqsave(&np->lock, flags);
3834			nv_link_irq(dev);
3835			spin_unlock_irqrestore(&np->lock, flags);
3836		}
3837		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3838			spin_lock_irqsave(&np->lock, flags);
3839			nv_linkchange(dev);
3840			spin_unlock_irqrestore(&np->lock, flags);
3841			np->link_timeout = jiffies + LINK_TIMEOUT;
3842		}
3843		if (events & NVREG_IRQ_RECOVER_ERROR) {
3844			spin_lock_irqsave(&np->lock, flags);
3845			/* disable interrupts on the nic */
3846			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3847			pci_push(base);
3848
3849			if (!np->in_shutdown) {
3850				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3851				np->recover_error = 1;
3852				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3853			}
3854			spin_unlock_irqrestore(&np->lock, flags);
3855			break;
3856		}
3857		if (unlikely(i > max_interrupt_work)) {
3858			spin_lock_irqsave(&np->lock, flags);
3859			/* disable interrupts on the nic */
3860			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3861			pci_push(base);
3862
3863			if (!np->in_shutdown) {
3864				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3865				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3866			}
3867			spin_unlock_irqrestore(&np->lock, flags);
3868			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3869				   __func__, i);
3870			break;
3871		}
3872
3873	}
3874
3875	return IRQ_RETVAL(i);
3876}
3877
3878static irqreturn_t nv_nic_irq_test(int foo, void *data)
3879{
3880	struct net_device *dev = (struct net_device *) data;
3881	struct fe_priv *np = netdev_priv(dev);
3882	u8 __iomem *base = get_hwbase(dev);
3883	u32 events;
3884
3885	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3886		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3887		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3888	} else {
3889		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3890		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3891	}
3892	pci_push(base);
3893	if (!(events & NVREG_IRQ_TIMER))
3894		return IRQ_RETVAL(0);
3895
3896	nv_msi_workaround(np);
3897
3898	spin_lock(&np->lock);
3899	np->intr_test = 1;
3900	spin_unlock(&np->lock);
3901
3902	return IRQ_RETVAL(1);
3903}
3904
3905static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3906{
3907	u8 __iomem *base = get_hwbase(dev);
3908	int i;
3909	u32 msixmap = 0;
3910
3911	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3912	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3913	 * the remaining 8 interrupts.
3914	 */
3915	for (i = 0; i < 8; i++) {
3916		if ((irqmask >> i) & 0x1)
3917			msixmap |= vector << (i << 2);
3918	}
3919	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3920
3921	msixmap = 0;
3922	for (i = 0; i < 8; i++) {
3923		if ((irqmask >> (i + 8)) & 0x1)
3924			msixmap |= vector << (i << 2);
3925	}
3926	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3927}
3928
3929static int nv_request_irq(struct net_device *dev, int intr_test)
3930{
3931	struct fe_priv *np = get_nvpriv(dev);
3932	u8 __iomem *base = get_hwbase(dev);
3933	int ret;
3934	int i;
3935	irqreturn_t (*handler)(int foo, void *data);
3936
3937	if (intr_test) {
3938		handler = nv_nic_irq_test;
3939	} else {
3940		if (nv_optimized(np))
3941			handler = nv_nic_irq_optimized;
3942		else
3943			handler = nv_nic_irq;
3944	}
3945
3946	if (np->msi_flags & NV_MSI_X_CAPABLE) {
3947		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3948			np->msi_x_entry[i].entry = i;
3949		ret = pci_enable_msix_range(np->pci_dev,
3950					    np->msi_x_entry,
3951					    np->msi_flags & NV_MSI_X_VECTORS_MASK,
3952					    np->msi_flags & NV_MSI_X_VECTORS_MASK);
3953		if (ret > 0) {
3954			np->msi_flags |= NV_MSI_X_ENABLED;
3955			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3956				/* Request irq for rx handling */
3957				sprintf(np->name_rx, "%s-rx", dev->name);
3958				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3959						  nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3960				if (ret) {
3961					netdev_info(dev,
3962						    "request_irq failed for rx %d\n",
3963						    ret);
3964					pci_disable_msix(np->pci_dev);
3965					np->msi_flags &= ~NV_MSI_X_ENABLED;
3966					goto out_err;
3967				}
3968				/* Request irq for tx handling */
3969				sprintf(np->name_tx, "%s-tx", dev->name);
3970				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3971						  nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3972				if (ret) {
3973					netdev_info(dev,
3974						    "request_irq failed for tx %d\n",
3975						    ret);
3976					pci_disable_msix(np->pci_dev);
3977					np->msi_flags &= ~NV_MSI_X_ENABLED;
3978					goto out_free_rx;
3979				}
3980				/* Request irq for link and timer handling */
3981				sprintf(np->name_other, "%s-other", dev->name);
3982				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3983						  nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3984				if (ret) {
3985					netdev_info(dev,
3986						    "request_irq failed for link %d\n",
3987						    ret);
3988					pci_disable_msix(np->pci_dev);
3989					np->msi_flags &= ~NV_MSI_X_ENABLED;
3990					goto out_free_tx;
3991				}
3992				/* map interrupts to their respective vector */
3993				writel(0, base + NvRegMSIXMap0);
3994				writel(0, base + NvRegMSIXMap1);
3995				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3996				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3997				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3998			} else {
3999				/* Request irq for all interrupts */
4000				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4001						  handler, IRQF_SHARED, dev->name, dev);
4002				if (ret) {
4003					netdev_info(dev,
4004						    "request_irq failed %d\n",
4005						    ret);
4006					pci_disable_msix(np->pci_dev);
4007					np->msi_flags &= ~NV_MSI_X_ENABLED;
4008					goto out_err;
4009				}
4010
4011				/* map interrupts to vector 0 */
4012				writel(0, base + NvRegMSIXMap0);
4013				writel(0, base + NvRegMSIXMap1);
4014			}
4015			netdev_info(dev, "MSI-X enabled\n");
4016			return 0;
4017		}
4018	}
4019	if (np->msi_flags & NV_MSI_CAPABLE) {
4020		ret = pci_enable_msi(np->pci_dev);
4021		if (ret == 0) {
4022			np->msi_flags |= NV_MSI_ENABLED;
4023			ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4024			if (ret) {
4025				netdev_info(dev, "request_irq failed %d\n",
4026					    ret);
4027				pci_disable_msi(np->pci_dev);
4028				np->msi_flags &= ~NV_MSI_ENABLED;
4029				goto out_err;
4030			}
4031
4032			/* map interrupts to vector 0 */
4033			writel(0, base + NvRegMSIMap0);
4034			writel(0, base + NvRegMSIMap1);
4035			/* enable msi vector 0 */
4036			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4037			netdev_info(dev, "MSI enabled\n");
4038			return 0;
4039		}
4040	}
4041
4042	if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4043		goto out_err;
4044
4045	return 0;
4046out_free_tx:
4047	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4048out_free_rx:
4049	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4050out_err:
4051	return 1;
4052}
4053
4054static void nv_free_irq(struct net_device *dev)
4055{
4056	struct fe_priv *np = get_nvpriv(dev);
4057	int i;
4058
4059	if (np->msi_flags & NV_MSI_X_ENABLED) {
4060		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4061			free_irq(np->msi_x_entry[i].vector, dev);
4062		pci_disable_msix(np->pci_dev);
4063		np->msi_flags &= ~NV_MSI_X_ENABLED;
4064	} else {
4065		free_irq(np->pci_dev->irq, dev);
4066		if (np->msi_flags & NV_MSI_ENABLED) {
4067			pci_disable_msi(np->pci_dev);
4068			np->msi_flags &= ~NV_MSI_ENABLED;
4069		}
4070	}
4071}
4072
4073static void nv_do_nic_poll(unsigned long data)
4074{
4075	struct net_device *dev = (struct net_device *) data;
4076	struct fe_priv *np = netdev_priv(dev);
4077	u8 __iomem *base = get_hwbase(dev);
4078	u32 mask = 0;
 
 
4079
4080	/*
4081	 * First disable irq(s) and then
4082	 * reenable interrupts on the nic, we have to do this before calling
4083	 * nv_nic_irq because that may decide to do otherwise
4084	 */
4085
4086	if (!using_multi_irqs(dev)) {
4087		if (np->msi_flags & NV_MSI_X_ENABLED)
4088			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4089		else
4090			disable_irq_lockdep(np->pci_dev->irq);
4091		mask = np->irqmask;
4092	} else {
4093		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4094			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4095			mask |= NVREG_IRQ_RX_ALL;
4096		}
4097		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4098			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4099			mask |= NVREG_IRQ_TX_ALL;
4100		}
4101		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4102			disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4103			mask |= NVREG_IRQ_OTHER;
4104		}
4105	}
4106	/* disable_irq() contains synchronize_irq, thus no irq handler can run now */
 
 
4107
4108	if (np->recover_error) {
4109		np->recover_error = 0;
4110		netdev_info(dev, "MAC in recoverable error state\n");
4111		if (netif_running(dev)) {
4112			netif_tx_lock_bh(dev);
4113			netif_addr_lock(dev);
4114			spin_lock(&np->lock);
4115			/* stop engines */
4116			nv_stop_rxtx(dev);
4117			if (np->driver_data & DEV_HAS_POWER_CNTRL)
4118				nv_mac_reset(dev);
4119			nv_txrx_reset(dev);
4120			/* drain rx queue */
4121			nv_drain_rxtx(dev);
4122			/* reinit driver view of the rx queue */
4123			set_bufsize(dev);
4124			if (nv_init_ring(dev)) {
4125				if (!np->in_shutdown)
4126					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4127			}
4128			/* reinit nic view of the rx queue */
4129			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4130			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4131			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4132				base + NvRegRingSizes);
4133			pci_push(base);
4134			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4135			pci_push(base);
4136			/* clear interrupts */
4137			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4138				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4139			else
4140				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4141
4142			/* restart rx engine */
4143			nv_start_rxtx(dev);
4144			spin_unlock(&np->lock);
4145			netif_addr_unlock(dev);
4146			netif_tx_unlock_bh(dev);
4147		}
4148	}
4149
4150	writel(mask, base + NvRegIrqMask);
4151	pci_push(base);
4152
4153	if (!using_multi_irqs(dev)) {
4154		np->nic_poll_irq = 0;
4155		if (nv_optimized(np))
4156			nv_nic_irq_optimized(0, dev);
4157		else
4158			nv_nic_irq(0, dev);
4159		if (np->msi_flags & NV_MSI_X_ENABLED)
4160			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
4161		else
4162			enable_irq_lockdep(np->pci_dev->irq);
4163	} else {
4164		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4165			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4166			nv_nic_irq_rx(0, dev);
4167			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
4168		}
4169		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4170			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4171			nv_nic_irq_tx(0, dev);
4172			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
4173		}
4174		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4175			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4176			nv_nic_irq_other(0, dev);
4177			enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4178		}
4179	}
4180
 
4181}
4182
4183#ifdef CONFIG_NET_POLL_CONTROLLER
4184static void nv_poll_controller(struct net_device *dev)
4185{
4186	nv_do_nic_poll((unsigned long) dev);
 
 
4187}
4188#endif
4189
4190static void nv_do_stats_poll(unsigned long data)
4191	__acquires(&netdev_priv(dev)->hwstats_lock)
4192	__releases(&netdev_priv(dev)->hwstats_lock)
4193{
4194	struct net_device *dev = (struct net_device *) data;
4195	struct fe_priv *np = netdev_priv(dev);
4196
4197	/* If lock is currently taken, the stats are being refreshed
4198	 * and hence fresh enough */
4199	if (spin_trylock(&np->hwstats_lock)) {
4200		nv_update_stats(dev);
4201		spin_unlock(&np->hwstats_lock);
4202	}
4203
4204	if (!np->in_shutdown)
4205		mod_timer(&np->stats_poll,
4206			round_jiffies(jiffies + STATS_INTERVAL));
4207}
4208
4209static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4210{
4211	struct fe_priv *np = netdev_priv(dev);
4212	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
4213	strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4214	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4215}
4216
4217static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4218{
4219	struct fe_priv *np = netdev_priv(dev);
4220	wolinfo->supported = WAKE_MAGIC;
4221
4222	spin_lock_irq(&np->lock);
4223	if (np->wolenabled)
4224		wolinfo->wolopts = WAKE_MAGIC;
4225	spin_unlock_irq(&np->lock);
4226}
4227
4228static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4229{
4230	struct fe_priv *np = netdev_priv(dev);
4231	u8 __iomem *base = get_hwbase(dev);
4232	u32 flags = 0;
4233
4234	if (wolinfo->wolopts == 0) {
4235		np->wolenabled = 0;
4236	} else if (wolinfo->wolopts & WAKE_MAGIC) {
4237		np->wolenabled = 1;
4238		flags = NVREG_WAKEUPFLAGS_ENABLE;
4239	}
4240	if (netif_running(dev)) {
4241		spin_lock_irq(&np->lock);
4242		writel(flags, base + NvRegWakeUpFlags);
4243		spin_unlock_irq(&np->lock);
4244	}
4245	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4246	return 0;
4247}
4248
4249static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
4250{
4251	struct fe_priv *np = netdev_priv(dev);
4252	u32 speed;
4253	int adv;
4254
4255	spin_lock_irq(&np->lock);
4256	ecmd->port = PORT_MII;
4257	if (!netif_running(dev)) {
4258		/* We do not track link speed / duplex setting if the
4259		 * interface is disabled. Force a link check */
4260		if (nv_update_linkspeed(dev)) {
4261			if (!netif_carrier_ok(dev))
4262				netif_carrier_on(dev);
4263		} else {
4264			if (netif_carrier_ok(dev))
4265				netif_carrier_off(dev);
4266		}
4267	}
4268
4269	if (netif_carrier_ok(dev)) {
4270		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4271		case NVREG_LINKSPEED_10:
4272			speed = SPEED_10;
4273			break;
4274		case NVREG_LINKSPEED_100:
4275			speed = SPEED_100;
4276			break;
4277		case NVREG_LINKSPEED_1000:
4278			speed = SPEED_1000;
4279			break;
4280		default:
4281			speed = -1;
4282			break;
4283		}
4284		ecmd->duplex = DUPLEX_HALF;
4285		if (np->duplex)
4286			ecmd->duplex = DUPLEX_FULL;
4287	} else {
4288		speed = -1;
4289		ecmd->duplex = -1;
4290	}
4291	ethtool_cmd_speed_set(ecmd, speed);
4292	ecmd->autoneg = np->autoneg;
4293
4294	ecmd->advertising = ADVERTISED_MII;
4295	if (np->autoneg) {
4296		ecmd->advertising |= ADVERTISED_Autoneg;
4297		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4298		if (adv & ADVERTISE_10HALF)
4299			ecmd->advertising |= ADVERTISED_10baseT_Half;
4300		if (adv & ADVERTISE_10FULL)
4301			ecmd->advertising |= ADVERTISED_10baseT_Full;
4302		if (adv & ADVERTISE_100HALF)
4303			ecmd->advertising |= ADVERTISED_100baseT_Half;
4304		if (adv & ADVERTISE_100FULL)
4305			ecmd->advertising |= ADVERTISED_100baseT_Full;
4306		if (np->gigabit == PHY_GIGABIT) {
4307			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4308			if (adv & ADVERTISE_1000FULL)
4309				ecmd->advertising |= ADVERTISED_1000baseT_Full;
4310		}
4311	}
4312	ecmd->supported = (SUPPORTED_Autoneg |
4313		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4314		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4315		SUPPORTED_MII);
4316	if (np->gigabit == PHY_GIGABIT)
4317		ecmd->supported |= SUPPORTED_1000baseT_Full;
4318
4319	ecmd->phy_address = np->phyaddr;
4320	ecmd->transceiver = XCVR_EXTERNAL;
 
 
 
 
4321
4322	/* ignore maxtxpkt, maxrxpkt for now */
4323	spin_unlock_irq(&np->lock);
4324	return 0;
4325}
4326
4327static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
4328{
4329	struct fe_priv *np = netdev_priv(dev);
4330	u32 speed = ethtool_cmd_speed(ecmd);
 
4331
4332	if (ecmd->port != PORT_MII)
4333		return -EINVAL;
4334	if (ecmd->transceiver != XCVR_EXTERNAL)
 
4335		return -EINVAL;
4336	if (ecmd->phy_address != np->phyaddr) {
4337		/* TODO: support switching between multiple phys. Should be
4338		 * trivial, but not enabled due to lack of test hardware. */
4339		return -EINVAL;
4340	}
4341	if (ecmd->autoneg == AUTONEG_ENABLE) {
4342		u32 mask;
4343
4344		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4345			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4346		if (np->gigabit == PHY_GIGABIT)
4347			mask |= ADVERTISED_1000baseT_Full;
4348
4349		if ((ecmd->advertising & mask) == 0)
4350			return -EINVAL;
4351
4352	} else if (ecmd->autoneg == AUTONEG_DISABLE) {
4353		/* Note: autonegotiation disable, speed 1000 intentionally
4354		 * forbidden - no one should need that. */
4355
4356		if (speed != SPEED_10 && speed != SPEED_100)
4357			return -EINVAL;
4358		if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
 
4359			return -EINVAL;
4360	} else {
4361		return -EINVAL;
4362	}
4363
4364	netif_carrier_off(dev);
4365	if (netif_running(dev)) {
4366		unsigned long flags;
4367
4368		nv_disable_irq(dev);
4369		netif_tx_lock_bh(dev);
4370		netif_addr_lock(dev);
4371		/* with plain spinlock lockdep complains */
4372		spin_lock_irqsave(&np->lock, flags);
4373		/* stop engines */
4374		/* FIXME:
4375		 * this can take some time, and interrupts are disabled
4376		 * due to spin_lock_irqsave, but let's hope no daemon
4377		 * is going to change the settings very often...
4378		 * Worst case:
4379		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4380		 * + some minor delays, which is up to a second approximately
4381		 */
4382		nv_stop_rxtx(dev);
4383		spin_unlock_irqrestore(&np->lock, flags);
4384		netif_addr_unlock(dev);
4385		netif_tx_unlock_bh(dev);
4386	}
4387
4388	if (ecmd->autoneg == AUTONEG_ENABLE) {
4389		int adv, bmcr;
4390
4391		np->autoneg = 1;
4392
4393		/* advertise only what has been requested */
4394		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4395		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4396		if (ecmd->advertising & ADVERTISED_10baseT_Half)
4397			adv |= ADVERTISE_10HALF;
4398		if (ecmd->advertising & ADVERTISED_10baseT_Full)
4399			adv |= ADVERTISE_10FULL;
4400		if (ecmd->advertising & ADVERTISED_100baseT_Half)
4401			adv |= ADVERTISE_100HALF;
4402		if (ecmd->advertising & ADVERTISED_100baseT_Full)
4403			adv |= ADVERTISE_100FULL;
4404		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4405			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4406		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4407			adv |=  ADVERTISE_PAUSE_ASYM;
4408		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4409
4410		if (np->gigabit == PHY_GIGABIT) {
4411			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4412			adv &= ~ADVERTISE_1000FULL;
4413			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4414				adv |= ADVERTISE_1000FULL;
4415			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4416		}
4417
4418		if (netif_running(dev))
4419			netdev_info(dev, "link down\n");
4420		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4421		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4422			bmcr |= BMCR_ANENABLE;
4423			/* reset the phy in order for settings to stick,
4424			 * and cause autoneg to start */
4425			if (phy_reset(dev, bmcr)) {
4426				netdev_info(dev, "phy reset failed\n");
4427				return -EINVAL;
4428			}
4429		} else {
4430			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4431			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4432		}
4433	} else {
4434		int adv, bmcr;
4435
4436		np->autoneg = 0;
4437
4438		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4439		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4440		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4441			adv |= ADVERTISE_10HALF;
4442		if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4443			adv |= ADVERTISE_10FULL;
4444		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4445			adv |= ADVERTISE_100HALF;
4446		if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4447			adv |= ADVERTISE_100FULL;
4448		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4449		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4450			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4451			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4452		}
4453		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4454			adv |=  ADVERTISE_PAUSE_ASYM;
4455			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4456		}
4457		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4458		np->fixed_mode = adv;
4459
4460		if (np->gigabit == PHY_GIGABIT) {
4461			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4462			adv &= ~ADVERTISE_1000FULL;
4463			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4464		}
4465
4466		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4467		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4468		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4469			bmcr |= BMCR_FULLDPLX;
4470		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4471			bmcr |= BMCR_SPEED100;
4472		if (np->phy_oui == PHY_OUI_MARVELL) {
4473			/* reset the phy in order for forced mode settings to stick */
4474			if (phy_reset(dev, bmcr)) {
4475				netdev_info(dev, "phy reset failed\n");
4476				return -EINVAL;
4477			}
4478		} else {
4479			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4480			if (netif_running(dev)) {
4481				/* Wait a bit and then reconfigure the nic. */
4482				udelay(10);
4483				nv_linkchange(dev);
4484			}
4485		}
4486	}
4487
4488	if (netif_running(dev)) {
4489		nv_start_rxtx(dev);
4490		nv_enable_irq(dev);
4491	}
4492
4493	return 0;
4494}
4495
4496#define FORCEDETH_REGS_VER	1
4497
4498static int nv_get_regs_len(struct net_device *dev)
4499{
4500	struct fe_priv *np = netdev_priv(dev);
4501	return np->register_size;
4502}
4503
4504static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4505{
4506	struct fe_priv *np = netdev_priv(dev);
4507	u8 __iomem *base = get_hwbase(dev);
4508	u32 *rbuf = buf;
4509	int i;
4510
4511	regs->version = FORCEDETH_REGS_VER;
4512	spin_lock_irq(&np->lock);
4513	for (i = 0; i < np->register_size/sizeof(u32); i++)
4514		rbuf[i] = readl(base + i*sizeof(u32));
4515	spin_unlock_irq(&np->lock);
4516}
4517
4518static int nv_nway_reset(struct net_device *dev)
4519{
4520	struct fe_priv *np = netdev_priv(dev);
4521	int ret;
4522
4523	if (np->autoneg) {
4524		int bmcr;
4525
4526		netif_carrier_off(dev);
4527		if (netif_running(dev)) {
4528			nv_disable_irq(dev);
4529			netif_tx_lock_bh(dev);
4530			netif_addr_lock(dev);
4531			spin_lock(&np->lock);
4532			/* stop engines */
4533			nv_stop_rxtx(dev);
4534			spin_unlock(&np->lock);
4535			netif_addr_unlock(dev);
4536			netif_tx_unlock_bh(dev);
4537			netdev_info(dev, "link down\n");
4538		}
4539
4540		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4541		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4542			bmcr |= BMCR_ANENABLE;
4543			/* reset the phy in order for settings to stick*/
4544			if (phy_reset(dev, bmcr)) {
4545				netdev_info(dev, "phy reset failed\n");
4546				return -EINVAL;
4547			}
4548		} else {
4549			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4550			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4551		}
4552
4553		if (netif_running(dev)) {
4554			nv_start_rxtx(dev);
4555			nv_enable_irq(dev);
4556		}
4557		ret = 0;
4558	} else {
4559		ret = -EINVAL;
4560	}
4561
4562	return ret;
4563}
4564
4565static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
 
 
 
4566{
4567	struct fe_priv *np = netdev_priv(dev);
4568
4569	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4570	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4571
4572	ring->rx_pending = np->rx_ring_size;
4573	ring->tx_pending = np->tx_ring_size;
4574}
4575
4576static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
 
 
 
4577{
4578	struct fe_priv *np = netdev_priv(dev);
4579	u8 __iomem *base = get_hwbase(dev);
4580	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4581	dma_addr_t ring_addr;
4582
4583	if (ring->rx_pending < RX_RING_MIN ||
4584	    ring->tx_pending < TX_RING_MIN ||
4585	    ring->rx_mini_pending != 0 ||
4586	    ring->rx_jumbo_pending != 0 ||
4587	    (np->desc_ver == DESC_VER_1 &&
4588	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4589	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4590	    (np->desc_ver != DESC_VER_1 &&
4591	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4592	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4593		return -EINVAL;
4594	}
4595
4596	/* allocate new rings */
4597	if (!nv_optimized(np)) {
4598		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4599					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4600					    &ring_addr);
4601	} else {
4602		rxtx_ring = pci_alloc_consistent(np->pci_dev,
4603					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4604					    &ring_addr);
4605	}
4606	rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4607	tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
 
 
 
 
 
 
4608	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4609		/* fall back to old rings */
4610		if (!nv_optimized(np)) {
4611			if (rxtx_ring)
4612				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4613						    rxtx_ring, ring_addr);
 
 
 
4614		} else {
4615			if (rxtx_ring)
4616				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4617						    rxtx_ring, ring_addr);
 
 
 
4618		}
4619
4620		kfree(rx_skbuff);
4621		kfree(tx_skbuff);
4622		goto exit;
4623	}
4624
4625	if (netif_running(dev)) {
4626		nv_disable_irq(dev);
4627		nv_napi_disable(dev);
4628		netif_tx_lock_bh(dev);
4629		netif_addr_lock(dev);
4630		spin_lock(&np->lock);
4631		/* stop engines */
4632		nv_stop_rxtx(dev);
4633		nv_txrx_reset(dev);
4634		/* drain queues */
4635		nv_drain_rxtx(dev);
4636		/* delete queues */
4637		free_rings(dev);
4638	}
4639
4640	/* set new values */
4641	np->rx_ring_size = ring->rx_pending;
4642	np->tx_ring_size = ring->tx_pending;
4643
4644	if (!nv_optimized(np)) {
4645		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4646		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4647	} else {
4648		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4649		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4650	}
4651	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4652	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4653	np->ring_addr = ring_addr;
4654
4655	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4656	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4657
4658	if (netif_running(dev)) {
4659		/* reinit driver view of the queues */
4660		set_bufsize(dev);
4661		if (nv_init_ring(dev)) {
4662			if (!np->in_shutdown)
4663				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4664		}
4665
4666		/* reinit nic view of the queues */
4667		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4668		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4669		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4670			base + NvRegRingSizes);
4671		pci_push(base);
4672		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4673		pci_push(base);
4674
4675		/* restart engines */
4676		nv_start_rxtx(dev);
4677		spin_unlock(&np->lock);
4678		netif_addr_unlock(dev);
4679		netif_tx_unlock_bh(dev);
4680		nv_napi_enable(dev);
4681		nv_enable_irq(dev);
4682	}
4683	return 0;
4684exit:
4685	return -ENOMEM;
4686}
4687
4688static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4689{
4690	struct fe_priv *np = netdev_priv(dev);
4691
4692	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4693	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4694	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4695}
4696
4697static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4698{
4699	struct fe_priv *np = netdev_priv(dev);
4700	int adv, bmcr;
4701
4702	if ((!np->autoneg && np->duplex == 0) ||
4703	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4704		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4705		return -EINVAL;
4706	}
4707	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4708		netdev_info(dev, "hardware does not support tx pause frames\n");
4709		return -EINVAL;
4710	}
4711
4712	netif_carrier_off(dev);
4713	if (netif_running(dev)) {
4714		nv_disable_irq(dev);
4715		netif_tx_lock_bh(dev);
4716		netif_addr_lock(dev);
4717		spin_lock(&np->lock);
4718		/* stop engines */
4719		nv_stop_rxtx(dev);
4720		spin_unlock(&np->lock);
4721		netif_addr_unlock(dev);
4722		netif_tx_unlock_bh(dev);
4723	}
4724
4725	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4726	if (pause->rx_pause)
4727		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4728	if (pause->tx_pause)
4729		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4730
4731	if (np->autoneg && pause->autoneg) {
4732		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4733
4734		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4735		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4736		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4737			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4738		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4739			adv |=  ADVERTISE_PAUSE_ASYM;
4740		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4741
4742		if (netif_running(dev))
4743			netdev_info(dev, "link down\n");
4744		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4745		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4746		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4747	} else {
4748		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4749		if (pause->rx_pause)
4750			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4751		if (pause->tx_pause)
4752			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4753
4754		if (!netif_running(dev))
4755			nv_update_linkspeed(dev);
4756		else
4757			nv_update_pause(dev, np->pause_flags);
4758	}
4759
4760	if (netif_running(dev)) {
4761		nv_start_rxtx(dev);
4762		nv_enable_irq(dev);
4763	}
4764	return 0;
4765}
4766
4767static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4768{
4769	struct fe_priv *np = netdev_priv(dev);
4770	unsigned long flags;
4771	u32 miicontrol;
4772	int err, retval = 0;
4773
4774	spin_lock_irqsave(&np->lock, flags);
4775	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4776	if (features & NETIF_F_LOOPBACK) {
4777		if (miicontrol & BMCR_LOOPBACK) {
4778			spin_unlock_irqrestore(&np->lock, flags);
4779			netdev_info(dev, "Loopback already enabled\n");
4780			return 0;
4781		}
4782		nv_disable_irq(dev);
4783		/* Turn on loopback mode */
4784		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4785		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4786		if (err) {
4787			retval = PHY_ERROR;
4788			spin_unlock_irqrestore(&np->lock, flags);
4789			phy_init(dev);
4790		} else {
4791			if (netif_running(dev)) {
4792				/* Force 1000 Mbps full-duplex */
4793				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4794									 1);
4795				/* Force link up */
4796				netif_carrier_on(dev);
4797			}
4798			spin_unlock_irqrestore(&np->lock, flags);
4799			netdev_info(dev,
4800				"Internal PHY loopback mode enabled.\n");
4801		}
4802	} else {
4803		if (!(miicontrol & BMCR_LOOPBACK)) {
4804			spin_unlock_irqrestore(&np->lock, flags);
4805			netdev_info(dev, "Loopback already disabled\n");
4806			return 0;
4807		}
4808		nv_disable_irq(dev);
4809		/* Turn off loopback */
4810		spin_unlock_irqrestore(&np->lock, flags);
4811		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4812		phy_init(dev);
4813	}
4814	msleep(500);
4815	spin_lock_irqsave(&np->lock, flags);
4816	nv_enable_irq(dev);
4817	spin_unlock_irqrestore(&np->lock, flags);
4818
4819	return retval;
4820}
4821
4822static netdev_features_t nv_fix_features(struct net_device *dev,
4823	netdev_features_t features)
4824{
4825	/* vlan is dependent on rx checksum offload */
4826	if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4827		features |= NETIF_F_RXCSUM;
4828
4829	return features;
4830}
4831
4832static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4833{
4834	struct fe_priv *np = get_nvpriv(dev);
4835
4836	spin_lock_irq(&np->lock);
4837
4838	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4839		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4840	else
4841		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4842
4843	if (features & NETIF_F_HW_VLAN_CTAG_TX)
4844		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4845	else
4846		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4847
4848	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4849
4850	spin_unlock_irq(&np->lock);
4851}
4852
4853static int nv_set_features(struct net_device *dev, netdev_features_t features)
4854{
4855	struct fe_priv *np = netdev_priv(dev);
4856	u8 __iomem *base = get_hwbase(dev);
4857	netdev_features_t changed = dev->features ^ features;
4858	int retval;
4859
4860	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4861		retval = nv_set_loopback(dev, features);
4862		if (retval != 0)
4863			return retval;
4864	}
4865
4866	if (changed & NETIF_F_RXCSUM) {
4867		spin_lock_irq(&np->lock);
4868
4869		if (features & NETIF_F_RXCSUM)
4870			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4871		else
4872			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4873
4874		if (netif_running(dev))
4875			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4876
4877		spin_unlock_irq(&np->lock);
4878	}
4879
4880	if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4881		nv_vlan_mode(dev, features);
4882
4883	return 0;
4884}
4885
4886static int nv_get_sset_count(struct net_device *dev, int sset)
4887{
4888	struct fe_priv *np = netdev_priv(dev);
4889
4890	switch (sset) {
4891	case ETH_SS_TEST:
4892		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4893			return NV_TEST_COUNT_EXTENDED;
4894		else
4895			return NV_TEST_COUNT_BASE;
4896	case ETH_SS_STATS:
4897		if (np->driver_data & DEV_HAS_STATISTICS_V3)
4898			return NV_DEV_STATISTICS_V3_COUNT;
4899		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4900			return NV_DEV_STATISTICS_V2_COUNT;
4901		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4902			return NV_DEV_STATISTICS_V1_COUNT;
4903		else
4904			return 0;
4905	default:
4906		return -EOPNOTSUPP;
4907	}
4908}
4909
4910static void nv_get_ethtool_stats(struct net_device *dev,
4911				 struct ethtool_stats *estats, u64 *buffer)
4912	__acquires(&netdev_priv(dev)->hwstats_lock)
4913	__releases(&netdev_priv(dev)->hwstats_lock)
4914{
4915	struct fe_priv *np = netdev_priv(dev);
4916
4917	spin_lock_bh(&np->hwstats_lock);
4918	nv_update_stats(dev);
4919	memcpy(buffer, &np->estats,
4920	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4921	spin_unlock_bh(&np->hwstats_lock);
4922}
4923
4924static int nv_link_test(struct net_device *dev)
4925{
4926	struct fe_priv *np = netdev_priv(dev);
4927	int mii_status;
4928
4929	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4930	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4931
4932	/* check phy link status */
4933	if (!(mii_status & BMSR_LSTATUS))
4934		return 0;
4935	else
4936		return 1;
4937}
4938
4939static int nv_register_test(struct net_device *dev)
4940{
4941	u8 __iomem *base = get_hwbase(dev);
4942	int i = 0;
4943	u32 orig_read, new_read;
4944
4945	do {
4946		orig_read = readl(base + nv_registers_test[i].reg);
4947
4948		/* xor with mask to toggle bits */
4949		orig_read ^= nv_registers_test[i].mask;
4950
4951		writel(orig_read, base + nv_registers_test[i].reg);
4952
4953		new_read = readl(base + nv_registers_test[i].reg);
4954
4955		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4956			return 0;
4957
4958		/* restore original value */
4959		orig_read ^= nv_registers_test[i].mask;
4960		writel(orig_read, base + nv_registers_test[i].reg);
4961
4962	} while (nv_registers_test[++i].reg != 0);
4963
4964	return 1;
4965}
4966
4967static int nv_interrupt_test(struct net_device *dev)
4968{
4969	struct fe_priv *np = netdev_priv(dev);
4970	u8 __iomem *base = get_hwbase(dev);
4971	int ret = 1;
4972	int testcnt;
4973	u32 save_msi_flags, save_poll_interval = 0;
4974
4975	if (netif_running(dev)) {
4976		/* free current irq */
4977		nv_free_irq(dev);
4978		save_poll_interval = readl(base+NvRegPollingInterval);
4979	}
4980
4981	/* flag to test interrupt handler */
4982	np->intr_test = 0;
4983
4984	/* setup test irq */
4985	save_msi_flags = np->msi_flags;
4986	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4987	np->msi_flags |= 0x001; /* setup 1 vector */
4988	if (nv_request_irq(dev, 1))
4989		return 0;
4990
4991	/* setup timer interrupt */
4992	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4993	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4994
4995	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4996
4997	/* wait for at least one interrupt */
4998	msleep(100);
4999
5000	spin_lock_irq(&np->lock);
5001
5002	/* flag should be set within ISR */
5003	testcnt = np->intr_test;
5004	if (!testcnt)
5005		ret = 2;
5006
5007	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5008	if (!(np->msi_flags & NV_MSI_X_ENABLED))
5009		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5010	else
5011		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5012
5013	spin_unlock_irq(&np->lock);
5014
5015	nv_free_irq(dev);
5016
5017	np->msi_flags = save_msi_flags;
5018
5019	if (netif_running(dev)) {
5020		writel(save_poll_interval, base + NvRegPollingInterval);
5021		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5022		/* restore original irq */
5023		if (nv_request_irq(dev, 0))
5024			return 0;
5025	}
5026
5027	return ret;
5028}
5029
5030static int nv_loopback_test(struct net_device *dev)
5031{
5032	struct fe_priv *np = netdev_priv(dev);
5033	u8 __iomem *base = get_hwbase(dev);
5034	struct sk_buff *tx_skb, *rx_skb;
5035	dma_addr_t test_dma_addr;
5036	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5037	u32 flags;
5038	int len, i, pkt_len;
5039	u8 *pkt_data;
5040	u32 filter_flags = 0;
5041	u32 misc1_flags = 0;
5042	int ret = 1;
5043
5044	if (netif_running(dev)) {
5045		nv_disable_irq(dev);
5046		filter_flags = readl(base + NvRegPacketFilterFlags);
5047		misc1_flags = readl(base + NvRegMisc1);
5048	} else {
5049		nv_txrx_reset(dev);
5050	}
5051
5052	/* reinit driver view of the rx queue */
5053	set_bufsize(dev);
5054	nv_init_ring(dev);
5055
5056	/* setup hardware for loopback */
5057	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5058	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5059
5060	/* reinit nic view of the rx queue */
5061	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5062	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5063	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5064		base + NvRegRingSizes);
5065	pci_push(base);
5066
5067	/* restart rx engine */
5068	nv_start_rxtx(dev);
5069
5070	/* setup packet for tx */
5071	pkt_len = ETH_DATA_LEN;
5072	tx_skb = netdev_alloc_skb(dev, pkt_len);
5073	if (!tx_skb) {
5074		ret = 0;
5075		goto out;
5076	}
5077	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
5078				       skb_tailroom(tx_skb),
5079				       PCI_DMA_FROMDEVICE);
5080	if (pci_dma_mapping_error(np->pci_dev,
5081				  test_dma_addr)) {
5082		dev_kfree_skb_any(tx_skb);
5083		goto out;
5084	}
5085	pkt_data = skb_put(tx_skb, pkt_len);
5086	for (i = 0; i < pkt_len; i++)
5087		pkt_data[i] = (u8)(i & 0xff);
5088
5089	if (!nv_optimized(np)) {
5090		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5091		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5092	} else {
5093		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5094		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5095		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5096	}
5097	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5098	pci_push(get_hwbase(dev));
5099
5100	msleep(500);
5101
5102	/* check for rx of the packet */
5103	if (!nv_optimized(np)) {
5104		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5105		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5106
5107	} else {
5108		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5109		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5110	}
5111
5112	if (flags & NV_RX_AVAIL) {
5113		ret = 0;
5114	} else if (np->desc_ver == DESC_VER_1) {
5115		if (flags & NV_RX_ERROR)
5116			ret = 0;
5117	} else {
5118		if (flags & NV_RX2_ERROR)
5119			ret = 0;
5120	}
5121
5122	if (ret) {
5123		if (len != pkt_len) {
5124			ret = 0;
5125		} else {
5126			rx_skb = np->rx_skb[0].skb;
5127			for (i = 0; i < pkt_len; i++) {
5128				if (rx_skb->data[i] != (u8)(i & 0xff)) {
5129					ret = 0;
5130					break;
5131				}
5132			}
5133		}
5134	}
5135
5136	pci_unmap_single(np->pci_dev, test_dma_addr,
5137		       (skb_end_pointer(tx_skb) - tx_skb->data),
5138		       PCI_DMA_TODEVICE);
5139	dev_kfree_skb_any(tx_skb);
5140 out:
5141	/* stop engines */
5142	nv_stop_rxtx(dev);
5143	nv_txrx_reset(dev);
5144	/* drain rx queue */
5145	nv_drain_rxtx(dev);
5146
5147	if (netif_running(dev)) {
5148		writel(misc1_flags, base + NvRegMisc1);
5149		writel(filter_flags, base + NvRegPacketFilterFlags);
5150		nv_enable_irq(dev);
5151	}
5152
5153	return ret;
5154}
5155
5156static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5157{
5158	struct fe_priv *np = netdev_priv(dev);
5159	u8 __iomem *base = get_hwbase(dev);
5160	int result, count;
5161
5162	count = nv_get_sset_count(dev, ETH_SS_TEST);
5163	memset(buffer, 0, count * sizeof(u64));
5164
5165	if (!nv_link_test(dev)) {
5166		test->flags |= ETH_TEST_FL_FAILED;
5167		buffer[0] = 1;
5168	}
5169
5170	if (test->flags & ETH_TEST_FL_OFFLINE) {
5171		if (netif_running(dev)) {
5172			netif_stop_queue(dev);
5173			nv_napi_disable(dev);
5174			netif_tx_lock_bh(dev);
5175			netif_addr_lock(dev);
5176			spin_lock_irq(&np->lock);
5177			nv_disable_hw_interrupts(dev, np->irqmask);
5178			if (!(np->msi_flags & NV_MSI_X_ENABLED))
5179				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5180			else
5181				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5182			/* stop engines */
5183			nv_stop_rxtx(dev);
5184			nv_txrx_reset(dev);
5185			/* drain rx queue */
5186			nv_drain_rxtx(dev);
5187			spin_unlock_irq(&np->lock);
5188			netif_addr_unlock(dev);
5189			netif_tx_unlock_bh(dev);
5190		}
5191
5192		if (!nv_register_test(dev)) {
5193			test->flags |= ETH_TEST_FL_FAILED;
5194			buffer[1] = 1;
5195		}
5196
5197		result = nv_interrupt_test(dev);
5198		if (result != 1) {
5199			test->flags |= ETH_TEST_FL_FAILED;
5200			buffer[2] = 1;
5201		}
5202		if (result == 0) {
5203			/* bail out */
5204			return;
5205		}
5206
5207		if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5208			test->flags |= ETH_TEST_FL_FAILED;
5209			buffer[3] = 1;
5210		}
5211
5212		if (netif_running(dev)) {
5213			/* reinit driver view of the rx queue */
5214			set_bufsize(dev);
5215			if (nv_init_ring(dev)) {
5216				if (!np->in_shutdown)
5217					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5218			}
5219			/* reinit nic view of the rx queue */
5220			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5221			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5222			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5223				base + NvRegRingSizes);
5224			pci_push(base);
5225			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5226			pci_push(base);
5227			/* restart rx engine */
5228			nv_start_rxtx(dev);
5229			netif_start_queue(dev);
5230			nv_napi_enable(dev);
5231			nv_enable_hw_interrupts(dev, np->irqmask);
5232		}
5233	}
5234}
5235
5236static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5237{
5238	switch (stringset) {
5239	case ETH_SS_STATS:
5240		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5241		break;
5242	case ETH_SS_TEST:
5243		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5244		break;
5245	}
5246}
5247
5248static const struct ethtool_ops ops = {
5249	.get_drvinfo = nv_get_drvinfo,
5250	.get_link = ethtool_op_get_link,
5251	.get_wol = nv_get_wol,
5252	.set_wol = nv_set_wol,
5253	.get_settings = nv_get_settings,
5254	.set_settings = nv_set_settings,
5255	.get_regs_len = nv_get_regs_len,
5256	.get_regs = nv_get_regs,
5257	.nway_reset = nv_nway_reset,
5258	.get_ringparam = nv_get_ringparam,
5259	.set_ringparam = nv_set_ringparam,
5260	.get_pauseparam = nv_get_pauseparam,
5261	.set_pauseparam = nv_set_pauseparam,
5262	.get_strings = nv_get_strings,
5263	.get_ethtool_stats = nv_get_ethtool_stats,
5264	.get_sset_count = nv_get_sset_count,
5265	.self_test = nv_self_test,
5266	.get_ts_info = ethtool_op_get_ts_info,
 
 
5267};
5268
5269/* The mgmt unit and driver use a semaphore to access the phy during init */
5270static int nv_mgmt_acquire_sema(struct net_device *dev)
5271{
5272	struct fe_priv *np = netdev_priv(dev);
5273	u8 __iomem *base = get_hwbase(dev);
5274	int i;
5275	u32 tx_ctrl, mgmt_sema;
5276
5277	for (i = 0; i < 10; i++) {
5278		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5279		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5280			break;
5281		msleep(500);
5282	}
5283
5284	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5285		return 0;
5286
5287	for (i = 0; i < 2; i++) {
5288		tx_ctrl = readl(base + NvRegTransmitterControl);
5289		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5290		writel(tx_ctrl, base + NvRegTransmitterControl);
5291
5292		/* verify that semaphore was acquired */
5293		tx_ctrl = readl(base + NvRegTransmitterControl);
5294		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5295		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5296			np->mgmt_sema = 1;
5297			return 1;
5298		} else
5299			udelay(50);
5300	}
5301
5302	return 0;
5303}
5304
5305static void nv_mgmt_release_sema(struct net_device *dev)
5306{
5307	struct fe_priv *np = netdev_priv(dev);
5308	u8 __iomem *base = get_hwbase(dev);
5309	u32 tx_ctrl;
5310
5311	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5312		if (np->mgmt_sema) {
5313			tx_ctrl = readl(base + NvRegTransmitterControl);
5314			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5315			writel(tx_ctrl, base + NvRegTransmitterControl);
5316		}
5317	}
5318}
5319
5320
5321static int nv_mgmt_get_version(struct net_device *dev)
5322{
5323	struct fe_priv *np = netdev_priv(dev);
5324	u8 __iomem *base = get_hwbase(dev);
5325	u32 data_ready = readl(base + NvRegTransmitterControl);
5326	u32 data_ready2 = 0;
5327	unsigned long start;
5328	int ready = 0;
5329
5330	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5331	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5332	start = jiffies;
5333	while (time_before(jiffies, start + 5*HZ)) {
5334		data_ready2 = readl(base + NvRegTransmitterControl);
5335		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5336			ready = 1;
5337			break;
5338		}
5339		schedule_timeout_uninterruptible(1);
5340	}
5341
5342	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5343		return 0;
5344
5345	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5346
5347	return 1;
5348}
5349
5350static int nv_open(struct net_device *dev)
5351{
5352	struct fe_priv *np = netdev_priv(dev);
5353	u8 __iomem *base = get_hwbase(dev);
5354	int ret = 1;
5355	int oom, i;
5356	u32 low;
5357
5358	/* power up phy */
5359	mii_rw(dev, np->phyaddr, MII_BMCR,
5360	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5361
5362	nv_txrx_gate(dev, false);
5363	/* erase previous misconfiguration */
5364	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5365		nv_mac_reset(dev);
5366	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5367	writel(0, base + NvRegMulticastAddrB);
5368	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5369	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5370	writel(0, base + NvRegPacketFilterFlags);
5371
5372	writel(0, base + NvRegTransmitterControl);
5373	writel(0, base + NvRegReceiverControl);
5374
5375	writel(0, base + NvRegAdapterControl);
5376
5377	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5378		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5379
5380	/* initialize descriptor rings */
5381	set_bufsize(dev);
5382	oom = nv_init_ring(dev);
5383
5384	writel(0, base + NvRegLinkSpeed);
5385	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5386	nv_txrx_reset(dev);
5387	writel(0, base + NvRegUnknownSetupReg6);
5388
5389	np->in_shutdown = 0;
5390
5391	/* give hw rings */
5392	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5393	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5394		base + NvRegRingSizes);
5395
5396	writel(np->linkspeed, base + NvRegLinkSpeed);
5397	if (np->desc_ver == DESC_VER_1)
5398		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5399	else
5400		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5401	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5402	writel(np->vlanctl_bits, base + NvRegVlanControl);
5403	pci_push(base);
5404	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5405	if (reg_delay(dev, NvRegUnknownSetupReg5,
5406		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5407		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5408		netdev_info(dev,
5409			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5410
5411	writel(0, base + NvRegMIIMask);
5412	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5413	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5414
5415	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5416	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5417	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5418	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5419
5420	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5421
5422	get_random_bytes(&low, sizeof(low));
5423	low &= NVREG_SLOTTIME_MASK;
5424	if (np->desc_ver == DESC_VER_1) {
5425		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5426	} else {
5427		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5428			/* setup legacy backoff */
5429			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5430		} else {
5431			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5432			nv_gear_backoff_reseed(dev);
5433		}
5434	}
5435	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5436	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5437	if (poll_interval == -1) {
5438		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5439			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5440		else
5441			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5442	} else
5443		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5444	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5445	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5446			base + NvRegAdapterControl);
5447	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5448	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5449	if (np->wolenabled)
5450		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5451
5452	i = readl(base + NvRegPowerState);
5453	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5454		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5455
5456	pci_push(base);
5457	udelay(10);
5458	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5459
5460	nv_disable_hw_interrupts(dev, np->irqmask);
5461	pci_push(base);
5462	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5463	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5464	pci_push(base);
5465
5466	if (nv_request_irq(dev, 0))
5467		goto out_drain;
5468
5469	/* ask for interrupts */
5470	nv_enable_hw_interrupts(dev, np->irqmask);
5471
5472	spin_lock_irq(&np->lock);
5473	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5474	writel(0, base + NvRegMulticastAddrB);
5475	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5476	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5477	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5478	/* One manual link speed update: Interrupts are enabled, future link
5479	 * speed changes cause interrupts and are handled by nv_link_irq().
5480	 */
5481	{
5482		u32 miistat;
5483		miistat = readl(base + NvRegMIIStatus);
5484		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5485	}
5486	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5487	 * to init hw */
5488	np->linkspeed = 0;
5489	ret = nv_update_linkspeed(dev);
5490	nv_start_rxtx(dev);
5491	netif_start_queue(dev);
5492	nv_napi_enable(dev);
5493
5494	if (ret) {
5495		netif_carrier_on(dev);
5496	} else {
5497		netdev_info(dev, "no link during initialization\n");
5498		netif_carrier_off(dev);
5499	}
5500	if (oom)
5501		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5502
5503	/* start statistics timer */
5504	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5505		mod_timer(&np->stats_poll,
5506			round_jiffies(jiffies + STATS_INTERVAL));
5507
5508	spin_unlock_irq(&np->lock);
5509
5510	/* If the loopback feature was set while the device was down, make sure
5511	 * that it's set correctly now.
5512	 */
5513	if (dev->features & NETIF_F_LOOPBACK)
5514		nv_set_loopback(dev, dev->features);
5515
5516	return 0;
5517out_drain:
5518	nv_drain_rxtx(dev);
5519	return ret;
5520}
5521
5522static int nv_close(struct net_device *dev)
5523{
5524	struct fe_priv *np = netdev_priv(dev);
5525	u8 __iomem *base;
5526
5527	spin_lock_irq(&np->lock);
5528	np->in_shutdown = 1;
5529	spin_unlock_irq(&np->lock);
5530	nv_napi_disable(dev);
5531	synchronize_irq(np->pci_dev->irq);
5532
5533	del_timer_sync(&np->oom_kick);
5534	del_timer_sync(&np->nic_poll);
5535	del_timer_sync(&np->stats_poll);
5536
5537	netif_stop_queue(dev);
5538	spin_lock_irq(&np->lock);
5539	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5540	nv_stop_rxtx(dev);
5541	nv_txrx_reset(dev);
5542
5543	/* disable interrupts on the nic or we will lock up */
5544	base = get_hwbase(dev);
5545	nv_disable_hw_interrupts(dev, np->irqmask);
5546	pci_push(base);
5547
5548	spin_unlock_irq(&np->lock);
5549
5550	nv_free_irq(dev);
5551
5552	nv_drain_rxtx(dev);
5553
5554	if (np->wolenabled || !phy_power_down) {
5555		nv_txrx_gate(dev, false);
5556		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5557		nv_start_rx(dev);
5558	} else {
5559		/* power down phy */
5560		mii_rw(dev, np->phyaddr, MII_BMCR,
5561		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5562		nv_txrx_gate(dev, true);
5563	}
5564
5565	/* FIXME: power down nic */
5566
5567	return 0;
5568}
5569
5570static const struct net_device_ops nv_netdev_ops = {
5571	.ndo_open		= nv_open,
5572	.ndo_stop		= nv_close,
5573	.ndo_get_stats64	= nv_get_stats64,
5574	.ndo_start_xmit		= nv_start_xmit,
5575	.ndo_tx_timeout		= nv_tx_timeout,
5576	.ndo_change_mtu		= nv_change_mtu,
5577	.ndo_fix_features	= nv_fix_features,
5578	.ndo_set_features	= nv_set_features,
5579	.ndo_validate_addr	= eth_validate_addr,
5580	.ndo_set_mac_address	= nv_set_mac_address,
5581	.ndo_set_rx_mode	= nv_set_multicast,
5582#ifdef CONFIG_NET_POLL_CONTROLLER
5583	.ndo_poll_controller	= nv_poll_controller,
5584#endif
5585};
5586
5587static const struct net_device_ops nv_netdev_ops_optimized = {
5588	.ndo_open		= nv_open,
5589	.ndo_stop		= nv_close,
5590	.ndo_get_stats64	= nv_get_stats64,
5591	.ndo_start_xmit		= nv_start_xmit_optimized,
5592	.ndo_tx_timeout		= nv_tx_timeout,
5593	.ndo_change_mtu		= nv_change_mtu,
5594	.ndo_fix_features	= nv_fix_features,
5595	.ndo_set_features	= nv_set_features,
5596	.ndo_validate_addr	= eth_validate_addr,
5597	.ndo_set_mac_address	= nv_set_mac_address,
5598	.ndo_set_rx_mode	= nv_set_multicast,
5599#ifdef CONFIG_NET_POLL_CONTROLLER
5600	.ndo_poll_controller	= nv_poll_controller,
5601#endif
5602};
5603
5604static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5605{
5606	struct net_device *dev;
5607	struct fe_priv *np;
5608	unsigned long addr;
5609	u8 __iomem *base;
5610	int err, i;
5611	u32 powerstate, txreg;
5612	u32 phystate_orig = 0, phystate;
5613	int phyinitialized = 0;
5614	static int printed_version;
 
5615
5616	if (!printed_version++)
5617		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5618			FORCEDETH_VERSION);
5619
5620	dev = alloc_etherdev(sizeof(struct fe_priv));
5621	err = -ENOMEM;
5622	if (!dev)
5623		goto out;
5624
5625	np = netdev_priv(dev);
5626	np->dev = dev;
5627	np->pci_dev = pci_dev;
5628	spin_lock_init(&np->lock);
5629	spin_lock_init(&np->hwstats_lock);
5630	SET_NETDEV_DEV(dev, &pci_dev->dev);
5631	u64_stats_init(&np->swstats_rx_syncp);
5632	u64_stats_init(&np->swstats_tx_syncp);
 
 
 
 
 
 
5633
5634	init_timer(&np->oom_kick);
5635	np->oom_kick.data = (unsigned long) dev;
5636	np->oom_kick.function = nv_do_rx_refill;	/* timer handler */
5637	init_timer(&np->nic_poll);
5638	np->nic_poll.data = (unsigned long) dev;
5639	np->nic_poll.function = nv_do_nic_poll;	/* timer handler */
5640	init_timer_deferrable(&np->stats_poll);
5641	np->stats_poll.data = (unsigned long) dev;
5642	np->stats_poll.function = nv_do_stats_poll;	/* timer handler */
5643
5644	err = pci_enable_device(pci_dev);
5645	if (err)
5646		goto out_free;
5647
5648	pci_set_master(pci_dev);
5649
5650	err = pci_request_regions(pci_dev, DRV_NAME);
5651	if (err < 0)
5652		goto out_disable;
5653
5654	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5655		np->register_size = NV_PCI_REGSZ_VER3;
5656	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5657		np->register_size = NV_PCI_REGSZ_VER2;
5658	else
5659		np->register_size = NV_PCI_REGSZ_VER1;
5660
5661	err = -EINVAL;
5662	addr = 0;
5663	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5664		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5665				pci_resource_len(pci_dev, i) >= np->register_size) {
5666			addr = pci_resource_start(pci_dev, i);
5667			break;
5668		}
5669	}
5670	if (i == DEVICE_COUNT_RESOURCE) {
5671		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5672		goto out_relreg;
5673	}
5674
5675	/* copy of driver data */
5676	np->driver_data = id->driver_data;
5677	/* copy of device id */
5678	np->device_id = id->device;
5679
5680	/* handle different descriptor versions */
5681	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5682		/* packet format 3: supports 40-bit addressing */
5683		np->desc_ver = DESC_VER_3;
5684		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5685		if (dma_64bit) {
5686			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5687				dev_info(&pci_dev->dev,
5688					 "64-bit DMA failed, using 32-bit addressing\n");
5689			else
5690				dev->features |= NETIF_F_HIGHDMA;
5691			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5692				dev_info(&pci_dev->dev,
5693					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5694			}
5695		}
5696	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5697		/* packet format 2: supports jumbo frames */
5698		np->desc_ver = DESC_VER_2;
5699		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5700	} else {
5701		/* original packet format */
5702		np->desc_ver = DESC_VER_1;
5703		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5704	}
5705
5706	np->pkt_limit = NV_PKTLIMIT_1;
5707	if (id->driver_data & DEV_HAS_LARGEDESC)
5708		np->pkt_limit = NV_PKTLIMIT_2;
5709
5710	if (id->driver_data & DEV_HAS_CHECKSUM) {
5711		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5712		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5713			NETIF_F_TSO | NETIF_F_RXCSUM;
5714	}
5715
5716	np->vlanctl_bits = 0;
5717	if (id->driver_data & DEV_HAS_VLAN) {
5718		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5719		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5720				    NETIF_F_HW_VLAN_CTAG_TX;
5721	}
5722
5723	dev->features |= dev->hw_features;
5724
5725	/* Add loopback capability to the device. */
5726	dev->hw_features |= NETIF_F_LOOPBACK;
5727
 
 
 
 
5728	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5729	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5730	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5731	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5732		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5733	}
5734
5735	err = -ENOMEM;
5736	np->base = ioremap(addr, np->register_size);
5737	if (!np->base)
5738		goto out_relreg;
5739
5740	np->rx_ring_size = RX_RING_DEFAULT;
5741	np->tx_ring_size = TX_RING_DEFAULT;
5742
5743	if (!nv_optimized(np)) {
5744		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5745					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5746					&np->ring_addr);
 
 
 
5747		if (!np->rx_ring.orig)
5748			goto out_unmap;
5749		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5750	} else {
5751		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5752					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5753					&np->ring_addr);
 
 
5754		if (!np->rx_ring.ex)
5755			goto out_unmap;
5756		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5757	}
5758	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5759	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5760	if (!np->rx_skb || !np->tx_skb)
5761		goto out_freering;
5762
5763	if (!nv_optimized(np))
5764		dev->netdev_ops = &nv_netdev_ops;
5765	else
5766		dev->netdev_ops = &nv_netdev_ops_optimized;
5767
5768	netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5769	SET_ETHTOOL_OPS(dev, &ops);
5770	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5771
5772	pci_set_drvdata(pci_dev, dev);
5773
5774	/* read the mac address */
5775	base = get_hwbase(dev);
5776	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5777	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5778
5779	/* check the workaround bit for correct mac address order */
5780	txreg = readl(base + NvRegTransmitPoll);
5781	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5782		/* mac address is already in correct order */
5783		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5784		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5785		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5786		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5787		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5788		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5789	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5790		/* mac address is already in correct order */
5791		dev->dev_addr[0] = (np->orig_mac[0] >>  0) & 0xff;
5792		dev->dev_addr[1] = (np->orig_mac[0] >>  8) & 0xff;
5793		dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5794		dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5795		dev->dev_addr[4] = (np->orig_mac[1] >>  0) & 0xff;
5796		dev->dev_addr[5] = (np->orig_mac[1] >>  8) & 0xff;
5797		/*
5798		 * Set orig mac address back to the reversed version.
5799		 * This flag will be cleared during low power transition.
5800		 * Therefore, we should always put back the reversed address.
5801		 */
5802		np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5803			(dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5804		np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5805	} else {
5806		/* need to reverse mac address to correct order */
5807		dev->dev_addr[0] = (np->orig_mac[1] >>  8) & 0xff;
5808		dev->dev_addr[1] = (np->orig_mac[1] >>  0) & 0xff;
5809		dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5810		dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5811		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
5812		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
5813		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5814		dev_dbg(&pci_dev->dev,
5815			"%s: set workaround bit for reversed mac addr\n",
5816			__func__);
5817	}
5818
5819	if (!is_valid_ether_addr(dev->dev_addr)) {
 
 
5820		/*
5821		 * Bad mac address. At least one bios sets the mac address
5822		 * to 01:23:45:67:89:ab
5823		 */
5824		dev_err(&pci_dev->dev,
5825			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5826			dev->dev_addr);
5827		eth_hw_addr_random(dev);
5828		dev_err(&pci_dev->dev,
5829			"Using random MAC address: %pM\n", dev->dev_addr);
5830	}
5831
5832	/* set mac address */
5833	nv_copy_mac_to_hw(dev);
5834
5835	/* disable WOL */
5836	writel(0, base + NvRegWakeUpFlags);
5837	np->wolenabled = 0;
5838	device_set_wakeup_enable(&pci_dev->dev, false);
5839
5840	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5841
5842		/* take phy and nic out of low power mode */
5843		powerstate = readl(base + NvRegPowerState2);
5844		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5845		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5846		    pci_dev->revision >= 0xA3)
5847			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5848		writel(powerstate, base + NvRegPowerState2);
5849	}
5850
5851	if (np->desc_ver == DESC_VER_1)
5852		np->tx_flags = NV_TX_VALID;
5853	else
5854		np->tx_flags = NV_TX2_VALID;
5855
5856	np->msi_flags = 0;
5857	if ((id->driver_data & DEV_HAS_MSI) && msi)
5858		np->msi_flags |= NV_MSI_CAPABLE;
5859
5860	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5861		/* msix has had reported issues when modifying irqmask
5862		   as in the case of napi, therefore, disable for now
5863		*/
5864#if 0
5865		np->msi_flags |= NV_MSI_X_CAPABLE;
5866#endif
5867	}
5868
5869	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5870		np->irqmask = NVREG_IRQMASK_CPU;
5871		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5872			np->msi_flags |= 0x0001;
5873	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5874		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5875		/* start off in throughput mode */
5876		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5877		/* remove support for msix mode */
5878		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5879	} else {
5880		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5881		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5882		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5883			np->msi_flags |= 0x0003;
5884	}
5885
5886	if (id->driver_data & DEV_NEED_TIMERIRQ)
5887		np->irqmask |= NVREG_IRQ_TIMER;
5888	if (id->driver_data & DEV_NEED_LINKTIMER) {
5889		np->need_linktimer = 1;
5890		np->link_timeout = jiffies + LINK_TIMEOUT;
5891	} else {
5892		np->need_linktimer = 0;
5893	}
5894
5895	/* Limit the number of tx's outstanding for hw bug */
5896	if (id->driver_data & DEV_NEED_TX_LIMIT) {
5897		np->tx_limit = 1;
5898		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5899		    pci_dev->revision >= 0xA2)
5900			np->tx_limit = 0;
5901	}
5902
5903	/* clear phy state and temporarily halt phy interrupts */
5904	writel(0, base + NvRegMIIMask);
5905	phystate = readl(base + NvRegAdapterControl);
5906	if (phystate & NVREG_ADAPTCTL_RUNNING) {
5907		phystate_orig = 1;
5908		phystate &= ~NVREG_ADAPTCTL_RUNNING;
5909		writel(phystate, base + NvRegAdapterControl);
5910	}
5911	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5912
5913	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5914		/* management unit running on the mac? */
5915		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5916		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5917		    nv_mgmt_acquire_sema(dev) &&
5918		    nv_mgmt_get_version(dev)) {
5919			np->mac_in_use = 1;
5920			if (np->mgmt_version > 0)
5921				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5922			/* management unit setup the phy already? */
5923			if (np->mac_in_use &&
5924			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5925			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
5926				/* phy is inited by mgmt unit */
5927				phyinitialized = 1;
5928			} else {
5929				/* we need to init the phy */
5930			}
5931		}
5932	}
5933
5934	/* find a suitable phy */
5935	for (i = 1; i <= 32; i++) {
5936		int id1, id2;
5937		int phyaddr = i & 0x1F;
5938
5939		spin_lock_irq(&np->lock);
5940		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5941		spin_unlock_irq(&np->lock);
5942		if (id1 < 0 || id1 == 0xffff)
5943			continue;
5944		spin_lock_irq(&np->lock);
5945		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5946		spin_unlock_irq(&np->lock);
5947		if (id2 < 0 || id2 == 0xffff)
5948			continue;
5949
5950		np->phy_model = id2 & PHYID2_MODEL_MASK;
5951		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5952		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5953		np->phyaddr = phyaddr;
5954		np->phy_oui = id1 | id2;
5955
5956		/* Realtek hardcoded phy id1 to all zero's on certain phys */
5957		if (np->phy_oui == PHY_OUI_REALTEK2)
5958			np->phy_oui = PHY_OUI_REALTEK;
5959		/* Setup phy revision for Realtek */
5960		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5961			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5962
5963		break;
5964	}
5965	if (i == 33) {
5966		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5967		goto out_error;
5968	}
5969
5970	if (!phyinitialized) {
5971		/* reset it */
5972		phy_init(dev);
5973	} else {
5974		/* see if it is a gigabit phy */
5975		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5976		if (mii_status & PHY_GIGABIT)
5977			np->gigabit = PHY_GIGABIT;
5978	}
5979
5980	/* set default link speed settings */
5981	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5982	np->duplex = 0;
5983	np->autoneg = 1;
5984
5985	err = register_netdev(dev);
5986	if (err) {
5987		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5988		goto out_error;
5989	}
5990
5991	netif_carrier_off(dev);
5992
5993	/* Some NICs freeze when TX pause is enabled while NIC is
5994	 * down, and this stays across warm reboots. The sequence
5995	 * below should be enough to recover from that state.
5996	 */
5997	nv_update_pause(dev, 0);
5998	nv_start_tx(dev);
5999	nv_stop_tx(dev);
6000
6001	if (id->driver_data & DEV_HAS_VLAN)
6002		nv_vlan_mode(dev, dev->features);
6003
6004	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
6005		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6006
6007	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6008		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6009		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6010			"csum " : "",
6011		 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6012				  NETIF_F_HW_VLAN_CTAG_TX) ?
6013			"vlan " : "",
6014		 dev->features & (NETIF_F_LOOPBACK) ?
6015			"loopback " : "",
6016		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6017		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6018		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6019		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6020		 np->need_linktimer ? "lnktim " : "",
6021		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6022		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6023		 np->desc_ver);
6024
6025	return 0;
6026
6027out_error:
 
6028	if (phystate_orig)
6029		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6030out_freering:
6031	free_rings(dev);
6032out_unmap:
6033	iounmap(get_hwbase(dev));
6034out_relreg:
6035	pci_release_regions(pci_dev);
6036out_disable:
6037	pci_disable_device(pci_dev);
6038out_free:
 
 
6039	free_netdev(dev);
6040out:
6041	return err;
6042}
6043
6044static void nv_restore_phy(struct net_device *dev)
6045{
6046	struct fe_priv *np = netdev_priv(dev);
6047	u16 phy_reserved, mii_control;
6048
6049	if (np->phy_oui == PHY_OUI_REALTEK &&
6050	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
6051	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6052		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6053		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6054		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6055		phy_reserved |= PHY_REALTEK_INIT8;
6056		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6057		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6058
6059		/* restart auto negotiation */
6060		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6061		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6062		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6063	}
6064}
6065
6066static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6067{
6068	struct net_device *dev = pci_get_drvdata(pci_dev);
6069	struct fe_priv *np = netdev_priv(dev);
6070	u8 __iomem *base = get_hwbase(dev);
6071
6072	/* special op: write back the misordered MAC address - otherwise
6073	 * the next nv_probe would see a wrong address.
6074	 */
6075	writel(np->orig_mac[0], base + NvRegMacAddrA);
6076	writel(np->orig_mac[1], base + NvRegMacAddrB);
6077	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6078	       base + NvRegTransmitPoll);
6079}
6080
6081static void nv_remove(struct pci_dev *pci_dev)
6082{
6083	struct net_device *dev = pci_get_drvdata(pci_dev);
 
 
 
6084
6085	unregister_netdev(dev);
6086
6087	nv_restore_mac_addr(pci_dev);
6088
6089	/* restore any phy related changes */
6090	nv_restore_phy(dev);
6091
6092	nv_mgmt_release_sema(dev);
6093
6094	/* free all structures */
6095	free_rings(dev);
6096	iounmap(get_hwbase(dev));
6097	pci_release_regions(pci_dev);
6098	pci_disable_device(pci_dev);
6099	free_netdev(dev);
6100}
6101
6102#ifdef CONFIG_PM_SLEEP
6103static int nv_suspend(struct device *device)
6104{
6105	struct pci_dev *pdev = to_pci_dev(device);
6106	struct net_device *dev = pci_get_drvdata(pdev);
6107	struct fe_priv *np = netdev_priv(dev);
6108	u8 __iomem *base = get_hwbase(dev);
6109	int i;
6110
6111	if (netif_running(dev)) {
6112		/* Gross. */
6113		nv_close(dev);
6114	}
6115	netif_device_detach(dev);
6116
6117	/* save non-pci configuration space */
6118	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6119		np->saved_config_space[i] = readl(base + i*sizeof(u32));
6120
6121	return 0;
6122}
6123
6124static int nv_resume(struct device *device)
6125{
6126	struct pci_dev *pdev = to_pci_dev(device);
6127	struct net_device *dev = pci_get_drvdata(pdev);
6128	struct fe_priv *np = netdev_priv(dev);
6129	u8 __iomem *base = get_hwbase(dev);
6130	int i, rc = 0;
6131
6132	/* restore non-pci configuration space */
6133	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6134		writel(np->saved_config_space[i], base+i*sizeof(u32));
6135
6136	if (np->driver_data & DEV_NEED_MSI_FIX)
6137		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6138
6139	/* restore phy state, including autoneg */
6140	phy_init(dev);
6141
6142	netif_device_attach(dev);
6143	if (netif_running(dev)) {
6144		rc = nv_open(dev);
6145		nv_set_multicast(dev);
6146	}
6147	return rc;
6148}
6149
6150static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6151#define NV_PM_OPS (&nv_pm_ops)
6152
6153#else
6154#define NV_PM_OPS NULL
6155#endif /* CONFIG_PM_SLEEP */
6156
6157#ifdef CONFIG_PM
6158static void nv_shutdown(struct pci_dev *pdev)
6159{
6160	struct net_device *dev = pci_get_drvdata(pdev);
6161	struct fe_priv *np = netdev_priv(dev);
6162
6163	if (netif_running(dev))
6164		nv_close(dev);
6165
6166	/*
6167	 * Restore the MAC so a kernel started by kexec won't get confused.
6168	 * If we really go for poweroff, we must not restore the MAC,
6169	 * otherwise the MAC for WOL will be reversed at least on some boards.
6170	 */
6171	if (system_state != SYSTEM_POWER_OFF)
6172		nv_restore_mac_addr(pdev);
6173
6174	pci_disable_device(pdev);
6175	/*
6176	 * Apparently it is not possible to reinitialise from D3 hot,
6177	 * only put the device into D3 if we really go for poweroff.
6178	 */
6179	if (system_state == SYSTEM_POWER_OFF) {
6180		pci_wake_from_d3(pdev, np->wolenabled);
6181		pci_set_power_state(pdev, PCI_D3hot);
6182	}
6183}
6184#else
6185#define nv_shutdown NULL
6186#endif /* CONFIG_PM */
6187
6188static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
6189	{	/* nForce Ethernet Controller */
6190		PCI_DEVICE(0x10DE, 0x01C3),
6191		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6192	},
6193	{	/* nForce2 Ethernet Controller */
6194		PCI_DEVICE(0x10DE, 0x0066),
6195		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6196	},
6197	{	/* nForce3 Ethernet Controller */
6198		PCI_DEVICE(0x10DE, 0x00D6),
6199		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6200	},
6201	{	/* nForce3 Ethernet Controller */
6202		PCI_DEVICE(0x10DE, 0x0086),
6203		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6204	},
6205	{	/* nForce3 Ethernet Controller */
6206		PCI_DEVICE(0x10DE, 0x008C),
6207		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6208	},
6209	{	/* nForce3 Ethernet Controller */
6210		PCI_DEVICE(0x10DE, 0x00E6),
6211		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6212	},
6213	{	/* nForce3 Ethernet Controller */
6214		PCI_DEVICE(0x10DE, 0x00DF),
6215		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6216	},
6217	{	/* CK804 Ethernet Controller */
6218		PCI_DEVICE(0x10DE, 0x0056),
6219		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6220	},
6221	{	/* CK804 Ethernet Controller */
6222		PCI_DEVICE(0x10DE, 0x0057),
6223		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6224	},
6225	{	/* MCP04 Ethernet Controller */
6226		PCI_DEVICE(0x10DE, 0x0037),
6227		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6228	},
6229	{	/* MCP04 Ethernet Controller */
6230		PCI_DEVICE(0x10DE, 0x0038),
6231		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6232	},
6233	{	/* MCP51 Ethernet Controller */
6234		PCI_DEVICE(0x10DE, 0x0268),
6235		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6236	},
6237	{	/* MCP51 Ethernet Controller */
6238		PCI_DEVICE(0x10DE, 0x0269),
6239		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6240	},
6241	{	/* MCP55 Ethernet Controller */
6242		PCI_DEVICE(0x10DE, 0x0372),
6243		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6244	},
6245	{	/* MCP55 Ethernet Controller */
6246		PCI_DEVICE(0x10DE, 0x0373),
6247		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6248	},
6249	{	/* MCP61 Ethernet Controller */
6250		PCI_DEVICE(0x10DE, 0x03E5),
6251		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6252	},
6253	{	/* MCP61 Ethernet Controller */
6254		PCI_DEVICE(0x10DE, 0x03E6),
6255		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6256	},
6257	{	/* MCP61 Ethernet Controller */
6258		PCI_DEVICE(0x10DE, 0x03EE),
6259		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6260	},
6261	{	/* MCP61 Ethernet Controller */
6262		PCI_DEVICE(0x10DE, 0x03EF),
6263		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6264	},
6265	{	/* MCP65 Ethernet Controller */
6266		PCI_DEVICE(0x10DE, 0x0450),
6267		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6268	},
6269	{	/* MCP65 Ethernet Controller */
6270		PCI_DEVICE(0x10DE, 0x0451),
6271		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6272	},
6273	{	/* MCP65 Ethernet Controller */
6274		PCI_DEVICE(0x10DE, 0x0452),
6275		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6276	},
6277	{	/* MCP65 Ethernet Controller */
6278		PCI_DEVICE(0x10DE, 0x0453),
6279		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6280	},
6281	{	/* MCP67 Ethernet Controller */
6282		PCI_DEVICE(0x10DE, 0x054C),
6283		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6284	},
6285	{	/* MCP67 Ethernet Controller */
6286		PCI_DEVICE(0x10DE, 0x054D),
6287		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6288	},
6289	{	/* MCP67 Ethernet Controller */
6290		PCI_DEVICE(0x10DE, 0x054E),
6291		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6292	},
6293	{	/* MCP67 Ethernet Controller */
6294		PCI_DEVICE(0x10DE, 0x054F),
6295		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6296	},
6297	{	/* MCP73 Ethernet Controller */
6298		PCI_DEVICE(0x10DE, 0x07DC),
6299		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6300	},
6301	{	/* MCP73 Ethernet Controller */
6302		PCI_DEVICE(0x10DE, 0x07DD),
6303		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6304	},
6305	{	/* MCP73 Ethernet Controller */
6306		PCI_DEVICE(0x10DE, 0x07DE),
6307		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6308	},
6309	{	/* MCP73 Ethernet Controller */
6310		PCI_DEVICE(0x10DE, 0x07DF),
6311		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6312	},
6313	{	/* MCP77 Ethernet Controller */
6314		PCI_DEVICE(0x10DE, 0x0760),
6315		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6316	},
6317	{	/* MCP77 Ethernet Controller */
6318		PCI_DEVICE(0x10DE, 0x0761),
6319		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6320	},
6321	{	/* MCP77 Ethernet Controller */
6322		PCI_DEVICE(0x10DE, 0x0762),
6323		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6324	},
6325	{	/* MCP77 Ethernet Controller */
6326		PCI_DEVICE(0x10DE, 0x0763),
6327		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6328	},
6329	{	/* MCP79 Ethernet Controller */
6330		PCI_DEVICE(0x10DE, 0x0AB0),
6331		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6332	},
6333	{	/* MCP79 Ethernet Controller */
6334		PCI_DEVICE(0x10DE, 0x0AB1),
6335		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6336	},
6337	{	/* MCP79 Ethernet Controller */
6338		PCI_DEVICE(0x10DE, 0x0AB2),
6339		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6340	},
6341	{	/* MCP79 Ethernet Controller */
6342		PCI_DEVICE(0x10DE, 0x0AB3),
6343		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6344	},
6345	{	/* MCP89 Ethernet Controller */
6346		PCI_DEVICE(0x10DE, 0x0D7D),
6347		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6348	},
6349	{0,},
6350};
6351
6352static struct pci_driver forcedeth_pci_driver = {
6353	.name		= DRV_NAME,
6354	.id_table	= pci_tbl,
6355	.probe		= nv_probe,
6356	.remove		= nv_remove,
6357	.shutdown	= nv_shutdown,
6358	.driver.pm	= NV_PM_OPS,
6359};
6360
6361module_param(max_interrupt_work, int, 0);
6362MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6363module_param(optimization_mode, int, 0);
6364MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6365module_param(poll_interval, int, 0);
6366MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6367module_param(msi, int, 0);
6368MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6369module_param(msix, int, 0);
6370MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6371module_param(dma_64bit, int, 0);
6372MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6373module_param(phy_cross, int, 0);
6374MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6375module_param(phy_power_down, int, 0);
6376MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6377module_param(debug_tx_timeout, bool, 0);
6378MODULE_PARM_DESC(debug_tx_timeout,
6379		 "Dump tx related registers and ring when tx_timeout happens");
6380
6381module_pci_driver(forcedeth_pci_driver);
6382MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6383MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6384MODULE_LICENSE("GPL");
6385MODULE_DEVICE_TABLE(pci, pci_tbl);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
   4 *
   5 * Note: This driver is a cleanroom reimplementation based on reverse
   6 *      engineered documentation written by Carl-Daniel Hailfinger
   7 *      and Andrew de Quincey.
   8 *
   9 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
  10 * trademarks of NVIDIA Corporation in the United States and other
  11 * countries.
  12 *
  13 * Copyright (C) 2003,4,5 Manfred Spraul
  14 * Copyright (C) 2004 Andrew de Quincey (wol support)
  15 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
  16 *		IRQ rate fixes, bigendian fixes, cleanups, verification)
  17 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
  18 *
 
 
 
 
 
 
 
 
 
 
 
 
 
  19 * Known bugs:
  20 * We suspect that on some hardware no TX done interrupts are generated.
  21 * This means recovery from netif_stop_queue only happens if the hw timer
  22 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
  23 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
  24 * If your hardware reliably generates tx done interrupts, then you can remove
  25 * DEV_NEED_TIMERIRQ from the driver_data flags.
  26 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  27 * superfluous timer interrupts from the nic.
  28 */
  29
  30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  31
  32#define FORCEDETH_VERSION		"0.64"
  33#define DRV_NAME			"forcedeth"
  34
  35#include <linux/module.h>
  36#include <linux/types.h>
  37#include <linux/pci.h>
  38#include <linux/interrupt.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/delay.h>
  42#include <linux/sched.h>
  43#include <linux/spinlock.h>
  44#include <linux/ethtool.h>
  45#include <linux/timer.h>
  46#include <linux/skbuff.h>
  47#include <linux/mii.h>
  48#include <linux/random.h>
  49#include <linux/if_vlan.h>
  50#include <linux/dma-mapping.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/prefetch.h>
  54#include <linux/u64_stats_sync.h>
  55#include <linux/io.h>
  56
  57#include <asm/irq.h>
  58
  59#define TX_WORK_PER_LOOP  NAPI_POLL_WEIGHT
  60#define RX_WORK_PER_LOOP  NAPI_POLL_WEIGHT
  61
  62/*
  63 * Hardware access:
  64 */
  65
  66#define DEV_NEED_TIMERIRQ          0x0000001  /* set the timer irq flag in the irq mask */
  67#define DEV_NEED_LINKTIMER         0x0000002  /* poll link settings. Relies on the timer irq */
  68#define DEV_HAS_LARGEDESC          0x0000004  /* device supports jumbo frames and needs packet format 2 */
  69#define DEV_HAS_HIGH_DMA           0x0000008  /* device supports 64bit dma */
  70#define DEV_HAS_CHECKSUM           0x0000010  /* device supports tx and rx checksum offloads */
  71#define DEV_HAS_VLAN               0x0000020  /* device supports vlan tagging and striping */
  72#define DEV_HAS_MSI                0x0000040  /* device supports MSI */
  73#define DEV_HAS_MSI_X              0x0000080  /* device supports MSI-X */
  74#define DEV_HAS_POWER_CNTRL        0x0000100  /* device supports power savings */
  75#define DEV_HAS_STATISTICS_V1      0x0000200  /* device supports hw statistics version 1 */
  76#define DEV_HAS_STATISTICS_V2      0x0000400  /* device supports hw statistics version 2 */
  77#define DEV_HAS_STATISTICS_V3      0x0000800  /* device supports hw statistics version 3 */
  78#define DEV_HAS_STATISTICS_V12     0x0000600  /* device supports hw statistics version 1 and 2 */
  79#define DEV_HAS_STATISTICS_V123    0x0000e00  /* device supports hw statistics version 1, 2, and 3 */
  80#define DEV_HAS_TEST_EXTENDED      0x0001000  /* device supports extended diagnostic test */
  81#define DEV_HAS_MGMT_UNIT          0x0002000  /* device supports management unit */
  82#define DEV_HAS_CORRECT_MACADDR    0x0004000  /* device supports correct mac address order */
  83#define DEV_HAS_COLLISION_FIX      0x0008000  /* device supports tx collision fix */
  84#define DEV_HAS_PAUSEFRAME_TX_V1   0x0010000  /* device supports tx pause frames version 1 */
  85#define DEV_HAS_PAUSEFRAME_TX_V2   0x0020000  /* device supports tx pause frames version 2 */
  86#define DEV_HAS_PAUSEFRAME_TX_V3   0x0040000  /* device supports tx pause frames version 3 */
  87#define DEV_NEED_TX_LIMIT          0x0080000  /* device needs to limit tx */
  88#define DEV_NEED_TX_LIMIT2         0x0180000  /* device needs to limit tx, expect for some revs */
  89#define DEV_HAS_GEAR_MODE          0x0200000  /* device supports gear mode */
  90#define DEV_NEED_PHY_INIT_FIX      0x0400000  /* device needs specific phy workaround */
  91#define DEV_NEED_LOW_POWER_FIX     0x0800000  /* device needs special power up workaround */
  92#define DEV_NEED_MSI_FIX           0x1000000  /* device needs msi workaround */
  93
  94enum {
  95	NvRegIrqStatus = 0x000,
  96#define NVREG_IRQSTAT_MIIEVENT	0x040
  97#define NVREG_IRQSTAT_MASK		0x83ff
  98	NvRegIrqMask = 0x004,
  99#define NVREG_IRQ_RX_ERROR		0x0001
 100#define NVREG_IRQ_RX			0x0002
 101#define NVREG_IRQ_RX_NOBUF		0x0004
 102#define NVREG_IRQ_TX_ERR		0x0008
 103#define NVREG_IRQ_TX_OK			0x0010
 104#define NVREG_IRQ_TIMER			0x0020
 105#define NVREG_IRQ_LINK			0x0040
 106#define NVREG_IRQ_RX_FORCED		0x0080
 107#define NVREG_IRQ_TX_FORCED		0x0100
 108#define NVREG_IRQ_RECOVER_ERROR		0x8200
 109#define NVREG_IRQMASK_THROUGHPUT	0x00df
 110#define NVREG_IRQMASK_CPU		0x0060
 111#define NVREG_IRQ_TX_ALL		(NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
 112#define NVREG_IRQ_RX_ALL		(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
 113#define NVREG_IRQ_OTHER			(NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
 114
 115	NvRegUnknownSetupReg6 = 0x008,
 116#define NVREG_UNKSETUP6_VAL		3
 117
 118/*
 119 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
 120 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
 121 */
 122	NvRegPollingInterval = 0x00c,
 123#define NVREG_POLL_DEFAULT_THROUGHPUT	65535 /* backup tx cleanup if loop max reached */
 124#define NVREG_POLL_DEFAULT_CPU	13
 125	NvRegMSIMap0 = 0x020,
 126	NvRegMSIMap1 = 0x024,
 127	NvRegMSIIrqMask = 0x030,
 128#define NVREG_MSI_VECTOR_0_ENABLED 0x01
 129	NvRegMisc1 = 0x080,
 130#define NVREG_MISC1_PAUSE_TX	0x01
 131#define NVREG_MISC1_HD		0x02
 132#define NVREG_MISC1_FORCE	0x3b0f3c
 133
 134	NvRegMacReset = 0x34,
 135#define NVREG_MAC_RESET_ASSERT	0x0F3
 136	NvRegTransmitterControl = 0x084,
 137#define NVREG_XMITCTL_START	0x01
 138#define NVREG_XMITCTL_MGMT_ST	0x40000000
 139#define NVREG_XMITCTL_SYNC_MASK		0x000f0000
 140#define NVREG_XMITCTL_SYNC_NOT_READY	0x0
 141#define NVREG_XMITCTL_SYNC_PHY_INIT	0x00040000
 142#define NVREG_XMITCTL_MGMT_SEMA_MASK	0x00000f00
 143#define NVREG_XMITCTL_MGMT_SEMA_FREE	0x0
 144#define NVREG_XMITCTL_HOST_SEMA_MASK	0x0000f000
 145#define NVREG_XMITCTL_HOST_SEMA_ACQ	0x0000f000
 146#define NVREG_XMITCTL_HOST_LOADED	0x00004000
 147#define NVREG_XMITCTL_TX_PATH_EN	0x01000000
 148#define NVREG_XMITCTL_DATA_START	0x00100000
 149#define NVREG_XMITCTL_DATA_READY	0x00010000
 150#define NVREG_XMITCTL_DATA_ERROR	0x00020000
 151	NvRegTransmitterStatus = 0x088,
 152#define NVREG_XMITSTAT_BUSY	0x01
 153
 154	NvRegPacketFilterFlags = 0x8c,
 155#define NVREG_PFF_PAUSE_RX	0x08
 156#define NVREG_PFF_ALWAYS	0x7F0000
 157#define NVREG_PFF_PROMISC	0x80
 158#define NVREG_PFF_MYADDR	0x20
 159#define NVREG_PFF_LOOPBACK	0x10
 160
 161	NvRegOffloadConfig = 0x90,
 162#define NVREG_OFFLOAD_HOMEPHY	0x601
 163#define NVREG_OFFLOAD_NORMAL	RX_NIC_BUFSIZE
 164	NvRegReceiverControl = 0x094,
 165#define NVREG_RCVCTL_START	0x01
 166#define NVREG_RCVCTL_RX_PATH_EN	0x01000000
 167	NvRegReceiverStatus = 0x98,
 168#define NVREG_RCVSTAT_BUSY	0x01
 169
 170	NvRegSlotTime = 0x9c,
 171#define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
 172#define NVREG_SLOTTIME_10_100_FULL	0x00007f00
 173#define NVREG_SLOTTIME_1000_FULL	0x0003ff00
 174#define NVREG_SLOTTIME_HALF		0x0000ff00
 175#define NVREG_SLOTTIME_DEFAULT		0x00007f00
 176#define NVREG_SLOTTIME_MASK		0x000000ff
 177
 178	NvRegTxDeferral = 0xA0,
 179#define NVREG_TX_DEFERRAL_DEFAULT		0x15050f
 180#define NVREG_TX_DEFERRAL_RGMII_10_100		0x16070f
 181#define NVREG_TX_DEFERRAL_RGMII_1000		0x14050f
 182#define NVREG_TX_DEFERRAL_RGMII_STRETCH_10	0x16190f
 183#define NVREG_TX_DEFERRAL_RGMII_STRETCH_100	0x16300f
 184#define NVREG_TX_DEFERRAL_MII_STRETCH		0x152000
 185	NvRegRxDeferral = 0xA4,
 186#define NVREG_RX_DEFERRAL_DEFAULT	0x16
 187	NvRegMacAddrA = 0xA8,
 188	NvRegMacAddrB = 0xAC,
 189	NvRegMulticastAddrA = 0xB0,
 190#define NVREG_MCASTADDRA_FORCE	0x01
 191	NvRegMulticastAddrB = 0xB4,
 192	NvRegMulticastMaskA = 0xB8,
 193#define NVREG_MCASTMASKA_NONE		0xffffffff
 194	NvRegMulticastMaskB = 0xBC,
 195#define NVREG_MCASTMASKB_NONE		0xffff
 196
 197	NvRegPhyInterface = 0xC0,
 198#define PHY_RGMII		0x10000000
 199	NvRegBackOffControl = 0xC4,
 200#define NVREG_BKOFFCTRL_DEFAULT			0x70000000
 201#define NVREG_BKOFFCTRL_SEED_MASK		0x000003ff
 202#define NVREG_BKOFFCTRL_SELECT			24
 203#define NVREG_BKOFFCTRL_GEAR			12
 204
 205	NvRegTxRingPhysAddr = 0x100,
 206	NvRegRxRingPhysAddr = 0x104,
 207	NvRegRingSizes = 0x108,
 208#define NVREG_RINGSZ_TXSHIFT 0
 209#define NVREG_RINGSZ_RXSHIFT 16
 210	NvRegTransmitPoll = 0x10c,
 211#define NVREG_TRANSMITPOLL_MAC_ADDR_REV	0x00008000
 212	NvRegLinkSpeed = 0x110,
 213#define NVREG_LINKSPEED_FORCE 0x10000
 214#define NVREG_LINKSPEED_10	1000
 215#define NVREG_LINKSPEED_100	100
 216#define NVREG_LINKSPEED_1000	50
 217#define NVREG_LINKSPEED_MASK	(0xFFF)
 218	NvRegUnknownSetupReg5 = 0x130,
 219#define NVREG_UNKSETUP5_BIT31	(1<<31)
 220	NvRegTxWatermark = 0x13c,
 221#define NVREG_TX_WM_DESC1_DEFAULT	0x0200010
 222#define NVREG_TX_WM_DESC2_3_DEFAULT	0x1e08000
 223#define NVREG_TX_WM_DESC2_3_1000	0xfe08000
 224	NvRegTxRxControl = 0x144,
 225#define NVREG_TXRXCTL_KICK	0x0001
 226#define NVREG_TXRXCTL_BIT1	0x0002
 227#define NVREG_TXRXCTL_BIT2	0x0004
 228#define NVREG_TXRXCTL_IDLE	0x0008
 229#define NVREG_TXRXCTL_RESET	0x0010
 230#define NVREG_TXRXCTL_RXCHECK	0x0400
 231#define NVREG_TXRXCTL_DESC_1	0
 232#define NVREG_TXRXCTL_DESC_2	0x002100
 233#define NVREG_TXRXCTL_DESC_3	0xc02200
 234#define NVREG_TXRXCTL_VLANSTRIP 0x00040
 235#define NVREG_TXRXCTL_VLANINS	0x00080
 236	NvRegTxRingPhysAddrHigh = 0x148,
 237	NvRegRxRingPhysAddrHigh = 0x14C,
 238	NvRegTxPauseFrame = 0x170,
 239#define NVREG_TX_PAUSEFRAME_DISABLE	0x0fff0080
 240#define NVREG_TX_PAUSEFRAME_ENABLE_V1	0x01800010
 241#define NVREG_TX_PAUSEFRAME_ENABLE_V2	0x056003f0
 242#define NVREG_TX_PAUSEFRAME_ENABLE_V3	0x09f00880
 243	NvRegTxPauseFrameLimit = 0x174,
 244#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE	0x00010000
 245	NvRegMIIStatus = 0x180,
 246#define NVREG_MIISTAT_ERROR		0x0001
 247#define NVREG_MIISTAT_LINKCHANGE	0x0008
 248#define NVREG_MIISTAT_MASK_RW		0x0007
 249#define NVREG_MIISTAT_MASK_ALL		0x000f
 250	NvRegMIIMask = 0x184,
 251#define NVREG_MII_LINKCHANGE		0x0008
 252
 253	NvRegAdapterControl = 0x188,
 254#define NVREG_ADAPTCTL_START	0x02
 255#define NVREG_ADAPTCTL_LINKUP	0x04
 256#define NVREG_ADAPTCTL_PHYVALID	0x40000
 257#define NVREG_ADAPTCTL_RUNNING	0x100000
 258#define NVREG_ADAPTCTL_PHYSHIFT	24
 259	NvRegMIISpeed = 0x18c,
 260#define NVREG_MIISPEED_BIT8	(1<<8)
 261#define NVREG_MIIDELAY	5
 262	NvRegMIIControl = 0x190,
 263#define NVREG_MIICTL_INUSE	0x08000
 264#define NVREG_MIICTL_WRITE	0x00400
 265#define NVREG_MIICTL_ADDRSHIFT	5
 266	NvRegMIIData = 0x194,
 267	NvRegTxUnicast = 0x1a0,
 268	NvRegTxMulticast = 0x1a4,
 269	NvRegTxBroadcast = 0x1a8,
 270	NvRegWakeUpFlags = 0x200,
 271#define NVREG_WAKEUPFLAGS_VAL		0x7770
 272#define NVREG_WAKEUPFLAGS_BUSYSHIFT	24
 273#define NVREG_WAKEUPFLAGS_ENABLESHIFT	16
 274#define NVREG_WAKEUPFLAGS_D3SHIFT	12
 275#define NVREG_WAKEUPFLAGS_D2SHIFT	8
 276#define NVREG_WAKEUPFLAGS_D1SHIFT	4
 277#define NVREG_WAKEUPFLAGS_D0SHIFT	0
 278#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT		0x01
 279#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT	0x02
 280#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE	0x04
 281#define NVREG_WAKEUPFLAGS_ENABLE	0x1111
 282
 283	NvRegMgmtUnitGetVersion = 0x204,
 284#define NVREG_MGMTUNITGETVERSION	0x01
 285	NvRegMgmtUnitVersion = 0x208,
 286#define NVREG_MGMTUNITVERSION		0x08
 287	NvRegPowerCap = 0x268,
 288#define NVREG_POWERCAP_D3SUPP	(1<<30)
 289#define NVREG_POWERCAP_D2SUPP	(1<<26)
 290#define NVREG_POWERCAP_D1SUPP	(1<<25)
 291	NvRegPowerState = 0x26c,
 292#define NVREG_POWERSTATE_POWEREDUP	0x8000
 293#define NVREG_POWERSTATE_VALID		0x0100
 294#define NVREG_POWERSTATE_MASK		0x0003
 295#define NVREG_POWERSTATE_D0		0x0000
 296#define NVREG_POWERSTATE_D1		0x0001
 297#define NVREG_POWERSTATE_D2		0x0002
 298#define NVREG_POWERSTATE_D3		0x0003
 299	NvRegMgmtUnitControl = 0x278,
 300#define NVREG_MGMTUNITCONTROL_INUSE	0x20000
 301	NvRegTxCnt = 0x280,
 302	NvRegTxZeroReXmt = 0x284,
 303	NvRegTxOneReXmt = 0x288,
 304	NvRegTxManyReXmt = 0x28c,
 305	NvRegTxLateCol = 0x290,
 306	NvRegTxUnderflow = 0x294,
 307	NvRegTxLossCarrier = 0x298,
 308	NvRegTxExcessDef = 0x29c,
 309	NvRegTxRetryErr = 0x2a0,
 310	NvRegRxFrameErr = 0x2a4,
 311	NvRegRxExtraByte = 0x2a8,
 312	NvRegRxLateCol = 0x2ac,
 313	NvRegRxRunt = 0x2b0,
 314	NvRegRxFrameTooLong = 0x2b4,
 315	NvRegRxOverflow = 0x2b8,
 316	NvRegRxFCSErr = 0x2bc,
 317	NvRegRxFrameAlignErr = 0x2c0,
 318	NvRegRxLenErr = 0x2c4,
 319	NvRegRxUnicast = 0x2c8,
 320	NvRegRxMulticast = 0x2cc,
 321	NvRegRxBroadcast = 0x2d0,
 322	NvRegTxDef = 0x2d4,
 323	NvRegTxFrame = 0x2d8,
 324	NvRegRxCnt = 0x2dc,
 325	NvRegTxPause = 0x2e0,
 326	NvRegRxPause = 0x2e4,
 327	NvRegRxDropFrame = 0x2e8,
 328	NvRegVlanControl = 0x300,
 329#define NVREG_VLANCONTROL_ENABLE	0x2000
 330	NvRegMSIXMap0 = 0x3e0,
 331	NvRegMSIXMap1 = 0x3e4,
 332	NvRegMSIXIrqStatus = 0x3f0,
 333
 334	NvRegPowerState2 = 0x600,
 335#define NVREG_POWERSTATE2_POWERUP_MASK		0x0F15
 336#define NVREG_POWERSTATE2_POWERUP_REV_A3	0x0001
 337#define NVREG_POWERSTATE2_PHY_RESET		0x0004
 338#define NVREG_POWERSTATE2_GATE_CLOCKS		0x0F00
 339};
 340
 341/* Big endian: should work, but is untested */
 342struct ring_desc {
 343	__le32 buf;
 344	__le32 flaglen;
 345};
 346
 347struct ring_desc_ex {
 348	__le32 bufhigh;
 349	__le32 buflow;
 350	__le32 txvlan;
 351	__le32 flaglen;
 352};
 353
 354union ring_type {
 355	struct ring_desc *orig;
 356	struct ring_desc_ex *ex;
 357};
 358
 359#define FLAG_MASK_V1 0xffff0000
 360#define FLAG_MASK_V2 0xffffc000
 361#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
 362#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
 363
 364#define NV_TX_LASTPACKET	(1<<16)
 365#define NV_TX_RETRYERROR	(1<<19)
 366#define NV_TX_RETRYCOUNT_MASK	(0xF<<20)
 367#define NV_TX_FORCED_INTERRUPT	(1<<24)
 368#define NV_TX_DEFERRED		(1<<26)
 369#define NV_TX_CARRIERLOST	(1<<27)
 370#define NV_TX_LATECOLLISION	(1<<28)
 371#define NV_TX_UNDERFLOW		(1<<29)
 372#define NV_TX_ERROR		(1<<30)
 373#define NV_TX_VALID		(1<<31)
 374
 375#define NV_TX2_LASTPACKET	(1<<29)
 376#define NV_TX2_RETRYERROR	(1<<18)
 377#define NV_TX2_RETRYCOUNT_MASK	(0xF<<19)
 378#define NV_TX2_FORCED_INTERRUPT	(1<<30)
 379#define NV_TX2_DEFERRED		(1<<25)
 380#define NV_TX2_CARRIERLOST	(1<<26)
 381#define NV_TX2_LATECOLLISION	(1<<27)
 382#define NV_TX2_UNDERFLOW	(1<<28)
 383/* error and valid are the same for both */
 384#define NV_TX2_ERROR		(1<<30)
 385#define NV_TX2_VALID		(1<<31)
 386#define NV_TX2_TSO		(1<<28)
 387#define NV_TX2_TSO_SHIFT	14
 388#define NV_TX2_TSO_MAX_SHIFT	14
 389#define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
 390#define NV_TX2_CHECKSUM_L3	(1<<27)
 391#define NV_TX2_CHECKSUM_L4	(1<<26)
 392
 393#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
 394
 395#define NV_RX_DESCRIPTORVALID	(1<<16)
 396#define NV_RX_MISSEDFRAME	(1<<17)
 397#define NV_RX_SUBTRACT1		(1<<18)
 398#define NV_RX_ERROR1		(1<<23)
 399#define NV_RX_ERROR2		(1<<24)
 400#define NV_RX_ERROR3		(1<<25)
 401#define NV_RX_ERROR4		(1<<26)
 402#define NV_RX_CRCERR		(1<<27)
 403#define NV_RX_OVERFLOW		(1<<28)
 404#define NV_RX_FRAMINGERR	(1<<29)
 405#define NV_RX_ERROR		(1<<30)
 406#define NV_RX_AVAIL		(1<<31)
 407#define NV_RX_ERROR_MASK	(NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
 408
 409#define NV_RX2_CHECKSUMMASK	(0x1C000000)
 410#define NV_RX2_CHECKSUM_IP	(0x10000000)
 411#define NV_RX2_CHECKSUM_IP_TCP	(0x14000000)
 412#define NV_RX2_CHECKSUM_IP_UDP	(0x18000000)
 413#define NV_RX2_DESCRIPTORVALID	(1<<29)
 414#define NV_RX2_SUBTRACT1	(1<<25)
 415#define NV_RX2_ERROR1		(1<<18)
 416#define NV_RX2_ERROR2		(1<<19)
 417#define NV_RX2_ERROR3		(1<<20)
 418#define NV_RX2_ERROR4		(1<<21)
 419#define NV_RX2_CRCERR		(1<<22)
 420#define NV_RX2_OVERFLOW		(1<<23)
 421#define NV_RX2_FRAMINGERR	(1<<24)
 422/* error and avail are the same for both */
 423#define NV_RX2_ERROR		(1<<30)
 424#define NV_RX2_AVAIL		(1<<31)
 425#define NV_RX2_ERROR_MASK	(NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
 426
 427#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
 428#define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
 429
 430/* Miscellaneous hardware related defines: */
 431#define NV_PCI_REGSZ_VER1	0x270
 432#define NV_PCI_REGSZ_VER2	0x2d4
 433#define NV_PCI_REGSZ_VER3	0x604
 434#define NV_PCI_REGSZ_MAX	0x604
 435
 436/* various timeout delays: all in usec */
 437#define NV_TXRX_RESET_DELAY	4
 438#define NV_TXSTOP_DELAY1	10
 439#define NV_TXSTOP_DELAY1MAX	500000
 440#define NV_TXSTOP_DELAY2	100
 441#define NV_RXSTOP_DELAY1	10
 442#define NV_RXSTOP_DELAY1MAX	500000
 443#define NV_RXSTOP_DELAY2	100
 444#define NV_SETUP5_DELAY		5
 445#define NV_SETUP5_DELAYMAX	50000
 446#define NV_POWERUP_DELAY	5
 447#define NV_POWERUP_DELAYMAX	5000
 448#define NV_MIIBUSY_DELAY	50
 449#define NV_MIIPHY_DELAY	10
 450#define NV_MIIPHY_DELAYMAX	10000
 451#define NV_MAC_RESET_DELAY	64
 452
 453#define NV_WAKEUPPATTERNS	5
 454#define NV_WAKEUPMASKENTRIES	4
 455
 456/* General driver defaults */
 457#define NV_WATCHDOG_TIMEO	(5*HZ)
 458
 459#define RX_RING_DEFAULT		512
 460#define TX_RING_DEFAULT		256
 461#define RX_RING_MIN		128
 462#define TX_RING_MIN		64
 463#define RING_MAX_DESC_VER_1	1024
 464#define RING_MAX_DESC_VER_2_3	16384
 465
 466/* rx/tx mac addr + type + vlan + align + slack*/
 467#define NV_RX_HEADERS		(64)
 468/* even more slack. */
 469#define NV_RX_ALLOC_PAD		(64)
 470
 471/* maximum mtu size */
 472#define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
 473#define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
 474
 475#define OOM_REFILL	(1+HZ/20)
 476#define POLL_WAIT	(1+HZ/100)
 477#define LINK_TIMEOUT	(3*HZ)
 478#define STATS_INTERVAL	(10*HZ)
 479
 480/*
 481 * desc_ver values:
 482 * The nic supports three different descriptor types:
 483 * - DESC_VER_1: Original
 484 * - DESC_VER_2: support for jumbo frames.
 485 * - DESC_VER_3: 64-bit format.
 486 */
 487#define DESC_VER_1	1
 488#define DESC_VER_2	2
 489#define DESC_VER_3	3
 490
 491/* PHY defines */
 492#define PHY_OUI_MARVELL		0x5043
 493#define PHY_OUI_CICADA		0x03f1
 494#define PHY_OUI_VITESSE		0x01c1
 495#define PHY_OUI_REALTEK		0x0732
 496#define PHY_OUI_REALTEK2	0x0020
 497#define PHYID1_OUI_MASK	0x03ff
 498#define PHYID1_OUI_SHFT	6
 499#define PHYID2_OUI_MASK	0xfc00
 500#define PHYID2_OUI_SHFT	10
 501#define PHYID2_MODEL_MASK		0x03f0
 502#define PHY_MODEL_REALTEK_8211		0x0110
 503#define PHY_REV_MASK			0x0001
 504#define PHY_REV_REALTEK_8211B		0x0000
 505#define PHY_REV_REALTEK_8211C		0x0001
 506#define PHY_MODEL_REALTEK_8201		0x0200
 507#define PHY_MODEL_MARVELL_E3016		0x0220
 508#define PHY_MARVELL_E3016_INITMASK	0x0300
 509#define PHY_CICADA_INIT1	0x0f000
 510#define PHY_CICADA_INIT2	0x0e00
 511#define PHY_CICADA_INIT3	0x01000
 512#define PHY_CICADA_INIT4	0x0200
 513#define PHY_CICADA_INIT5	0x0004
 514#define PHY_CICADA_INIT6	0x02000
 515#define PHY_VITESSE_INIT_REG1	0x1f
 516#define PHY_VITESSE_INIT_REG2	0x10
 517#define PHY_VITESSE_INIT_REG3	0x11
 518#define PHY_VITESSE_INIT_REG4	0x12
 519#define PHY_VITESSE_INIT_MSK1	0xc
 520#define PHY_VITESSE_INIT_MSK2	0x0180
 521#define PHY_VITESSE_INIT1	0x52b5
 522#define PHY_VITESSE_INIT2	0xaf8a
 523#define PHY_VITESSE_INIT3	0x8
 524#define PHY_VITESSE_INIT4	0x8f8a
 525#define PHY_VITESSE_INIT5	0xaf86
 526#define PHY_VITESSE_INIT6	0x8f86
 527#define PHY_VITESSE_INIT7	0xaf82
 528#define PHY_VITESSE_INIT8	0x0100
 529#define PHY_VITESSE_INIT9	0x8f82
 530#define PHY_VITESSE_INIT10	0x0
 531#define PHY_REALTEK_INIT_REG1	0x1f
 532#define PHY_REALTEK_INIT_REG2	0x19
 533#define PHY_REALTEK_INIT_REG3	0x13
 534#define PHY_REALTEK_INIT_REG4	0x14
 535#define PHY_REALTEK_INIT_REG5	0x18
 536#define PHY_REALTEK_INIT_REG6	0x11
 537#define PHY_REALTEK_INIT_REG7	0x01
 538#define PHY_REALTEK_INIT1	0x0000
 539#define PHY_REALTEK_INIT2	0x8e00
 540#define PHY_REALTEK_INIT3	0x0001
 541#define PHY_REALTEK_INIT4	0xad17
 542#define PHY_REALTEK_INIT5	0xfb54
 543#define PHY_REALTEK_INIT6	0xf5c7
 544#define PHY_REALTEK_INIT7	0x1000
 545#define PHY_REALTEK_INIT8	0x0003
 546#define PHY_REALTEK_INIT9	0x0008
 547#define PHY_REALTEK_INIT10	0x0005
 548#define PHY_REALTEK_INIT11	0x0200
 549#define PHY_REALTEK_INIT_MSK1	0x0003
 550
 551#define PHY_GIGABIT	0x0100
 552
 553#define PHY_TIMEOUT	0x1
 554#define PHY_ERROR	0x2
 555
 556#define PHY_100	0x1
 557#define PHY_1000	0x2
 558#define PHY_HALF	0x100
 559
 560#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
 561#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 562#define NV_PAUSEFRAME_RX_ENABLE  0x0004
 563#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 564#define NV_PAUSEFRAME_RX_REQ     0x0010
 565#define NV_PAUSEFRAME_TX_REQ     0x0020
 566#define NV_PAUSEFRAME_AUTONEG    0x0040
 567
 568/* MSI/MSI-X defines */
 569#define NV_MSI_X_MAX_VECTORS  8
 570#define NV_MSI_X_VECTORS_MASK 0x000f
 571#define NV_MSI_CAPABLE        0x0010
 572#define NV_MSI_X_CAPABLE      0x0020
 573#define NV_MSI_ENABLED        0x0040
 574#define NV_MSI_X_ENABLED      0x0080
 575
 576#define NV_MSI_X_VECTOR_ALL   0x0
 577#define NV_MSI_X_VECTOR_RX    0x0
 578#define NV_MSI_X_VECTOR_TX    0x1
 579#define NV_MSI_X_VECTOR_OTHER 0x2
 580
 581#define NV_MSI_PRIV_OFFSET 0x68
 582#define NV_MSI_PRIV_VALUE  0xffffffff
 583
 584#define NV_RESTART_TX         0x1
 585#define NV_RESTART_RX         0x2
 586
 587#define NV_TX_LIMIT_COUNT     16
 588
 589#define NV_DYNAMIC_THRESHOLD        4
 590#define NV_DYNAMIC_MAX_QUIET_COUNT  2048
 591
 592/* statistics */
 593struct nv_ethtool_str {
 594	char name[ETH_GSTRING_LEN];
 595};
 596
 597static const struct nv_ethtool_str nv_estats_str[] = {
 598	{ "tx_bytes" }, /* includes Ethernet FCS CRC */
 599	{ "tx_zero_rexmt" },
 600	{ "tx_one_rexmt" },
 601	{ "tx_many_rexmt" },
 602	{ "tx_late_collision" },
 603	{ "tx_fifo_errors" },
 604	{ "tx_carrier_errors" },
 605	{ "tx_excess_deferral" },
 606	{ "tx_retry_error" },
 607	{ "rx_frame_error" },
 608	{ "rx_extra_byte" },
 609	{ "rx_late_collision" },
 610	{ "rx_runt" },
 611	{ "rx_frame_too_long" },
 612	{ "rx_over_errors" },
 613	{ "rx_crc_errors" },
 614	{ "rx_frame_align_error" },
 615	{ "rx_length_error" },
 616	{ "rx_unicast" },
 617	{ "rx_multicast" },
 618	{ "rx_broadcast" },
 619	{ "rx_packets" },
 620	{ "rx_errors_total" },
 621	{ "tx_errors_total" },
 622
 623	/* version 2 stats */
 624	{ "tx_deferral" },
 625	{ "tx_packets" },
 626	{ "rx_bytes" }, /* includes Ethernet FCS CRC */
 627	{ "tx_pause" },
 628	{ "rx_pause" },
 629	{ "rx_drop_frame" },
 630
 631	/* version 3 stats */
 632	{ "tx_unicast" },
 633	{ "tx_multicast" },
 634	{ "tx_broadcast" }
 635};
 636
 637struct nv_ethtool_stats {
 638	u64 tx_bytes; /* should be ifconfig->tx_bytes + 4*tx_packets */
 639	u64 tx_zero_rexmt;
 640	u64 tx_one_rexmt;
 641	u64 tx_many_rexmt;
 642	u64 tx_late_collision;
 643	u64 tx_fifo_errors;
 644	u64 tx_carrier_errors;
 645	u64 tx_excess_deferral;
 646	u64 tx_retry_error;
 647	u64 rx_frame_error;
 648	u64 rx_extra_byte;
 649	u64 rx_late_collision;
 650	u64 rx_runt;
 651	u64 rx_frame_too_long;
 652	u64 rx_over_errors;
 653	u64 rx_crc_errors;
 654	u64 rx_frame_align_error;
 655	u64 rx_length_error;
 656	u64 rx_unicast;
 657	u64 rx_multicast;
 658	u64 rx_broadcast;
 659	u64 rx_packets; /* should be ifconfig->rx_packets */
 660	u64 rx_errors_total;
 661	u64 tx_errors_total;
 662
 663	/* version 2 stats */
 664	u64 tx_deferral;
 665	u64 tx_packets; /* should be ifconfig->tx_packets */
 666	u64 rx_bytes;   /* should be ifconfig->rx_bytes + 4*rx_packets */
 667	u64 tx_pause;
 668	u64 rx_pause;
 669	u64 rx_drop_frame;
 670
 671	/* version 3 stats */
 672	u64 tx_unicast;
 673	u64 tx_multicast;
 674	u64 tx_broadcast;
 675};
 676
 677#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
 678#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
 679#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
 680
 681/* diagnostics */
 682#define NV_TEST_COUNT_BASE 3
 683#define NV_TEST_COUNT_EXTENDED 4
 684
 685static const struct nv_ethtool_str nv_etests_str[] = {
 686	{ "link      (online/offline)" },
 687	{ "register  (offline)       " },
 688	{ "interrupt (offline)       " },
 689	{ "loopback  (offline)       " }
 690};
 691
 692struct register_test {
 693	__u32 reg;
 694	__u32 mask;
 695};
 696
 697static const struct register_test nv_registers_test[] = {
 698	{ NvRegUnknownSetupReg6, 0x01 },
 699	{ NvRegMisc1, 0x03c },
 700	{ NvRegOffloadConfig, 0x03ff },
 701	{ NvRegMulticastAddrA, 0xffffffff },
 702	{ NvRegTxWatermark, 0x0ff },
 703	{ NvRegWakeUpFlags, 0x07777 },
 704	{ 0, 0 }
 705};
 706
 707struct nv_skb_map {
 708	struct sk_buff *skb;
 709	dma_addr_t dma;
 710	unsigned int dma_len:31;
 711	unsigned int dma_single:1;
 712	struct ring_desc_ex *first_tx_desc;
 713	struct nv_skb_map *next_tx_ctx;
 714};
 715
 716struct nv_txrx_stats {
 717	u64 stat_rx_packets;
 718	u64 stat_rx_bytes; /* not always available in HW */
 719	u64 stat_rx_missed_errors;
 720	u64 stat_rx_dropped;
 721	u64 stat_tx_packets; /* not always available in HW */
 722	u64 stat_tx_bytes;
 723	u64 stat_tx_dropped;
 724};
 725
 726#define nv_txrx_stats_inc(member) \
 727		__this_cpu_inc(np->txrx_stats->member)
 728#define nv_txrx_stats_add(member, count) \
 729		__this_cpu_add(np->txrx_stats->member, (count))
 730
 731/*
 732 * SMP locking:
 733 * All hardware access under netdev_priv(dev)->lock, except the performance
 734 * critical parts:
 735 * - rx is (pseudo-) lockless: it relies on the single-threading provided
 736 *	by the arch code for interrupts.
 737 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
 738 *	needs netdev_priv(dev)->lock :-(
 739 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
 740 *
 741 * Hardware stats updates are protected by hwstats_lock:
 742 * - updated by nv_do_stats_poll (timer). This is meant to avoid
 743 *   integer wraparound in the NIC stats registers, at low frequency
 744 *   (0.1 Hz)
 745 * - updated by nv_get_ethtool_stats + nv_get_stats64
 746 *
 747 * Software stats are accessed only through 64b synchronization points
 748 * and are not subject to other synchronization techniques (single
 749 * update thread on the TX or RX paths).
 750 */
 751
 752/* in dev: base, irq */
 753struct fe_priv {
 754	spinlock_t lock;
 755
 756	struct net_device *dev;
 757	struct napi_struct napi;
 758
 759	/* hardware stats are updated in syscall and timer */
 760	spinlock_t hwstats_lock;
 761	struct nv_ethtool_stats estats;
 762
 763	int in_shutdown;
 764	u32 linkspeed;
 765	int duplex;
 766	int autoneg;
 767	int fixed_mode;
 768	int phyaddr;
 769	int wolenabled;
 770	unsigned int phy_oui;
 771	unsigned int phy_model;
 772	unsigned int phy_rev;
 773	u16 gigabit;
 774	int intr_test;
 775	int recover_error;
 776	int quiet_count;
 777
 778	/* General data: RO fields */
 779	dma_addr_t ring_addr;
 780	struct pci_dev *pci_dev;
 781	u32 orig_mac[2];
 782	u32 events;
 783	u32 irqmask;
 784	u32 desc_ver;
 785	u32 txrxctl_bits;
 786	u32 vlanctl_bits;
 787	u32 driver_data;
 788	u32 device_id;
 789	u32 register_size;
 790	u32 mac_in_use;
 791	int mgmt_version;
 792	int mgmt_sema;
 793
 794	void __iomem *base;
 795
 796	/* rx specific fields.
 797	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 798	 */
 799	union ring_type get_rx, put_rx, last_rx;
 800	struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
 801	struct nv_skb_map *last_rx_ctx;
 802	struct nv_skb_map *rx_skb;
 803
 804	union ring_type rx_ring;
 805	unsigned int rx_buf_sz;
 806	unsigned int pkt_limit;
 807	struct timer_list oom_kick;
 808	struct timer_list nic_poll;
 809	struct timer_list stats_poll;
 810	u32 nic_poll_irq;
 811	int rx_ring_size;
 812
 813	/* RX software stats */
 814	struct u64_stats_sync swstats_rx_syncp;
 815	struct nv_txrx_stats __percpu *txrx_stats;
 
 
 
 816
 817	/* media detection workaround.
 818	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 819	 */
 820	int need_linktimer;
 821	unsigned long link_timeout;
 822	/*
 823	 * tx specific fields.
 824	 */
 825	union ring_type get_tx, put_tx, last_tx;
 826	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
 827	struct nv_skb_map *last_tx_ctx;
 828	struct nv_skb_map *tx_skb;
 829
 830	union ring_type tx_ring;
 831	u32 tx_flags;
 832	int tx_ring_size;
 833	int tx_limit;
 834	u32 tx_pkts_in_progress;
 835	struct nv_skb_map *tx_change_owner;
 836	struct nv_skb_map *tx_end_flip;
 837	int tx_stop;
 838
 839	/* TX software stats */
 840	struct u64_stats_sync swstats_tx_syncp;
 
 
 
 841
 842	/* msi/msi-x fields */
 843	u32 msi_flags;
 844	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
 845
 846	/* flow control */
 847	u32 pause_flags;
 848
 849	/* power saved state */
 850	u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
 851
 852	/* for different msi-x irq type */
 853	char name_rx[IFNAMSIZ + 3];       /* -rx    */
 854	char name_tx[IFNAMSIZ + 3];       /* -tx    */
 855	char name_other[IFNAMSIZ + 6];    /* -other */
 856};
 857
 858/*
 859 * Maximum number of loops until we assume that a bit in the irq mask
 860 * is stuck. Overridable with module param.
 861 */
 862static int max_interrupt_work = 4;
 863
 864/*
 865 * Optimization can be either throuput mode or cpu mode
 866 *
 867 * Throughput Mode: Every tx and rx packet will generate an interrupt.
 868 * CPU Mode: Interrupts are controlled by a timer.
 869 */
 870enum {
 871	NV_OPTIMIZATION_MODE_THROUGHPUT,
 872	NV_OPTIMIZATION_MODE_CPU,
 873	NV_OPTIMIZATION_MODE_DYNAMIC
 874};
 875static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
 876
 877/*
 878 * Poll interval for timer irq
 879 *
 880 * This interval determines how frequent an interrupt is generated.
 881 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
 882 * Min = 0, and Max = 65535
 883 */
 884static int poll_interval = -1;
 885
 886/*
 887 * MSI interrupts
 888 */
 889enum {
 890	NV_MSI_INT_DISABLED,
 891	NV_MSI_INT_ENABLED
 892};
 893static int msi = NV_MSI_INT_ENABLED;
 894
 895/*
 896 * MSIX interrupts
 897 */
 898enum {
 899	NV_MSIX_INT_DISABLED,
 900	NV_MSIX_INT_ENABLED
 901};
 902static int msix = NV_MSIX_INT_ENABLED;
 903
 904/*
 905 * DMA 64bit
 906 */
 907enum {
 908	NV_DMA_64BIT_DISABLED,
 909	NV_DMA_64BIT_ENABLED
 910};
 911static int dma_64bit = NV_DMA_64BIT_ENABLED;
 912
 913/*
 914 * Debug output control for tx_timeout
 915 */
 916static bool debug_tx_timeout = false;
 917
 918/*
 919 * Crossover Detection
 920 * Realtek 8201 phy + some OEM boards do not work properly.
 921 */
 922enum {
 923	NV_CROSSOVER_DETECTION_DISABLED,
 924	NV_CROSSOVER_DETECTION_ENABLED
 925};
 926static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
 927
 928/*
 929 * Power down phy when interface is down (persists through reboot;
 930 * older Linux and other OSes may not power it up again)
 931 */
 932static int phy_power_down;
 933
 934static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 935{
 936	return netdev_priv(dev);
 937}
 938
 939static inline u8 __iomem *get_hwbase(struct net_device *dev)
 940{
 941	return ((struct fe_priv *)netdev_priv(dev))->base;
 942}
 943
 944static inline void pci_push(u8 __iomem *base)
 945{
 946	/* force out pending posted writes */
 947	readl(base);
 948}
 949
 950static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
 951{
 952	return le32_to_cpu(prd->flaglen)
 953		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 954}
 955
 956static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
 957{
 958	return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
 959}
 960
 961static bool nv_optimized(struct fe_priv *np)
 962{
 963	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 964		return false;
 965	return true;
 966}
 967
 968static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 969		     int delay, int delaymax)
 970{
 971	u8 __iomem *base = get_hwbase(dev);
 972
 973	pci_push(base);
 974	do {
 975		udelay(delay);
 976		delaymax -= delay;
 977		if (delaymax < 0)
 978			return 1;
 979	} while ((readl(base + offset) & mask) != target);
 980	return 0;
 981}
 982
 983#define NV_SETUP_RX_RING 0x01
 984#define NV_SETUP_TX_RING 0x02
 985
 986static inline u32 dma_low(dma_addr_t addr)
 987{
 988	return addr;
 989}
 990
 991static inline u32 dma_high(dma_addr_t addr)
 992{
 993	return addr>>31>>1;	/* 0 if 32bit, shift down by 32 if 64bit */
 994}
 995
 996static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
 997{
 998	struct fe_priv *np = get_nvpriv(dev);
 999	u8 __iomem *base = get_hwbase(dev);
1000
1001	if (!nv_optimized(np)) {
1002		if (rxtx_flags & NV_SETUP_RX_RING)
1003			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1004		if (rxtx_flags & NV_SETUP_TX_RING)
1005			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
1006	} else {
1007		if (rxtx_flags & NV_SETUP_RX_RING) {
1008			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
1009			writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
1010		}
1011		if (rxtx_flags & NV_SETUP_TX_RING) {
1012			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
1013			writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1014		}
1015	}
1016}
1017
1018static void free_rings(struct net_device *dev)
1019{
1020	struct fe_priv *np = get_nvpriv(dev);
1021
1022	if (!nv_optimized(np)) {
1023		if (np->rx_ring.orig)
1024			dma_free_coherent(&np->pci_dev->dev,
1025					  sizeof(struct ring_desc) *
1026					  (np->rx_ring_size +
1027					  np->tx_ring_size),
1028					  np->rx_ring.orig, np->ring_addr);
1029	} else {
1030		if (np->rx_ring.ex)
1031			dma_free_coherent(&np->pci_dev->dev,
1032					  sizeof(struct ring_desc_ex) *
1033					  (np->rx_ring_size +
1034					  np->tx_ring_size),
1035					  np->rx_ring.ex, np->ring_addr);
1036	}
1037	kfree(np->rx_skb);
1038	kfree(np->tx_skb);
1039}
1040
1041static int using_multi_irqs(struct net_device *dev)
1042{
1043	struct fe_priv *np = get_nvpriv(dev);
1044
1045	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1046	    ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
 
1047		return 0;
1048	else
1049		return 1;
1050}
1051
1052static void nv_txrx_gate(struct net_device *dev, bool gate)
1053{
1054	struct fe_priv *np = get_nvpriv(dev);
1055	u8 __iomem *base = get_hwbase(dev);
1056	u32 powerstate;
1057
1058	if (!np->mac_in_use &&
1059	    (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1060		powerstate = readl(base + NvRegPowerState2);
1061		if (gate)
1062			powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1063		else
1064			powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1065		writel(powerstate, base + NvRegPowerState2);
1066	}
1067}
1068
1069static void nv_enable_irq(struct net_device *dev)
1070{
1071	struct fe_priv *np = get_nvpriv(dev);
1072
1073	if (!using_multi_irqs(dev)) {
1074		if (np->msi_flags & NV_MSI_X_ENABLED)
1075			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1076		else
1077			enable_irq(np->pci_dev->irq);
1078	} else {
1079		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1080		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1081		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1082	}
1083}
1084
1085static void nv_disable_irq(struct net_device *dev)
1086{
1087	struct fe_priv *np = get_nvpriv(dev);
1088
1089	if (!using_multi_irqs(dev)) {
1090		if (np->msi_flags & NV_MSI_X_ENABLED)
1091			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1092		else
1093			disable_irq(np->pci_dev->irq);
1094	} else {
1095		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1096		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1097		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1098	}
1099}
1100
1101/* In MSIX mode, a write to irqmask behaves as XOR */
1102static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1103{
1104	u8 __iomem *base = get_hwbase(dev);
1105
1106	writel(mask, base + NvRegIrqMask);
1107}
1108
1109static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1110{
1111	struct fe_priv *np = get_nvpriv(dev);
1112	u8 __iomem *base = get_hwbase(dev);
1113
1114	if (np->msi_flags & NV_MSI_X_ENABLED) {
1115		writel(mask, base + NvRegIrqMask);
1116	} else {
1117		if (np->msi_flags & NV_MSI_ENABLED)
1118			writel(0, base + NvRegMSIIrqMask);
1119		writel(0, base + NvRegIrqMask);
1120	}
1121}
1122
1123static void nv_napi_enable(struct net_device *dev)
1124{
1125	struct fe_priv *np = get_nvpriv(dev);
1126
1127	napi_enable(&np->napi);
1128}
1129
1130static void nv_napi_disable(struct net_device *dev)
1131{
1132	struct fe_priv *np = get_nvpriv(dev);
1133
1134	napi_disable(&np->napi);
1135}
1136
1137#define MII_READ	(-1)
1138/* mii_rw: read/write a register on the PHY.
1139 *
1140 * Caller must guarantee serialization
1141 */
1142static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1143{
1144	u8 __iomem *base = get_hwbase(dev);
1145	u32 reg;
1146	int retval;
1147
1148	writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1149
1150	reg = readl(base + NvRegMIIControl);
1151	if (reg & NVREG_MIICTL_INUSE) {
1152		writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1153		udelay(NV_MIIBUSY_DELAY);
1154	}
1155
1156	reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1157	if (value != MII_READ) {
1158		writel(value, base + NvRegMIIData);
1159		reg |= NVREG_MIICTL_WRITE;
1160	}
1161	writel(reg, base + NvRegMIIControl);
1162
1163	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1164			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1165		retval = -1;
1166	} else if (value != MII_READ) {
1167		/* it was a write operation - fewer failures are detectable */
1168		retval = 0;
1169	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1170		retval = -1;
1171	} else {
1172		retval = readl(base + NvRegMIIData);
1173	}
1174
1175	return retval;
1176}
1177
1178static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1179{
1180	struct fe_priv *np = netdev_priv(dev);
1181	u32 miicontrol;
1182	unsigned int tries = 0;
1183
1184	miicontrol = BMCR_RESET | bmcr_setup;
1185	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1186		return -1;
1187
1188	/* wait for 500ms */
1189	msleep(500);
1190
1191	/* must wait till reset is deasserted */
1192	while (miicontrol & BMCR_RESET) {
1193		usleep_range(10000, 20000);
1194		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1195		/* FIXME: 100 tries seem excessive */
1196		if (tries++ > 100)
1197			return -1;
1198	}
1199	return 0;
1200}
1201
1202static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1203{
1204	static const struct {
1205		int reg;
1206		int init;
1207	} ri[] = {
1208		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1209		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1210		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1211		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1212		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1213		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1214		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1215	};
1216	int i;
1217
1218	for (i = 0; i < ARRAY_SIZE(ri); i++) {
1219		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1220			return PHY_ERROR;
1221	}
1222
1223	return 0;
1224}
1225
1226static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1227{
1228	u32 reg;
1229	u8 __iomem *base = get_hwbase(dev);
1230	u32 powerstate = readl(base + NvRegPowerState2);
1231
1232	/* need to perform hw phy reset */
1233	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1234	writel(powerstate, base + NvRegPowerState2);
1235	msleep(25);
1236
1237	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1238	writel(powerstate, base + NvRegPowerState2);
1239	msleep(25);
1240
1241	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1242	reg |= PHY_REALTEK_INIT9;
1243	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1244		return PHY_ERROR;
1245	if (mii_rw(dev, np->phyaddr,
1246		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1247		return PHY_ERROR;
1248	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1249	if (!(reg & PHY_REALTEK_INIT11)) {
1250		reg |= PHY_REALTEK_INIT11;
1251		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1252			return PHY_ERROR;
1253	}
1254	if (mii_rw(dev, np->phyaddr,
1255		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1256		return PHY_ERROR;
1257
1258	return 0;
1259}
1260
1261static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1262{
1263	u32 phy_reserved;
1264
1265	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1266		phy_reserved = mii_rw(dev, np->phyaddr,
1267				      PHY_REALTEK_INIT_REG6, MII_READ);
1268		phy_reserved |= PHY_REALTEK_INIT7;
1269		if (mii_rw(dev, np->phyaddr,
1270			   PHY_REALTEK_INIT_REG6, phy_reserved))
1271			return PHY_ERROR;
1272	}
1273
1274	return 0;
1275}
1276
1277static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1278{
1279	u32 phy_reserved;
1280
1281	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1282		if (mii_rw(dev, np->phyaddr,
1283			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1284			return PHY_ERROR;
1285		phy_reserved = mii_rw(dev, np->phyaddr,
1286				      PHY_REALTEK_INIT_REG2, MII_READ);
1287		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1288		phy_reserved |= PHY_REALTEK_INIT3;
1289		if (mii_rw(dev, np->phyaddr,
1290			   PHY_REALTEK_INIT_REG2, phy_reserved))
1291			return PHY_ERROR;
1292		if (mii_rw(dev, np->phyaddr,
1293			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1294			return PHY_ERROR;
1295	}
1296
1297	return 0;
1298}
1299
1300static int init_cicada(struct net_device *dev, struct fe_priv *np,
1301		       u32 phyinterface)
1302{
1303	u32 phy_reserved;
1304
1305	if (phyinterface & PHY_RGMII) {
1306		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1307		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1308		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1309		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1310			return PHY_ERROR;
1311		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1312		phy_reserved |= PHY_CICADA_INIT5;
1313		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1314			return PHY_ERROR;
1315	}
1316	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1317	phy_reserved |= PHY_CICADA_INIT6;
1318	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1319		return PHY_ERROR;
1320
1321	return 0;
1322}
1323
1324static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1325{
1326	u32 phy_reserved;
1327
1328	if (mii_rw(dev, np->phyaddr,
1329		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1330		return PHY_ERROR;
1331	if (mii_rw(dev, np->phyaddr,
1332		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1333		return PHY_ERROR;
1334	phy_reserved = mii_rw(dev, np->phyaddr,
1335			      PHY_VITESSE_INIT_REG4, MII_READ);
1336	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1337		return PHY_ERROR;
1338	phy_reserved = mii_rw(dev, np->phyaddr,
1339			      PHY_VITESSE_INIT_REG3, MII_READ);
1340	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1341	phy_reserved |= PHY_VITESSE_INIT3;
1342	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1343		return PHY_ERROR;
1344	if (mii_rw(dev, np->phyaddr,
1345		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1346		return PHY_ERROR;
1347	if (mii_rw(dev, np->phyaddr,
1348		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1349		return PHY_ERROR;
1350	phy_reserved = mii_rw(dev, np->phyaddr,
1351			      PHY_VITESSE_INIT_REG4, MII_READ);
1352	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1353	phy_reserved |= PHY_VITESSE_INIT3;
1354	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1355		return PHY_ERROR;
1356	phy_reserved = mii_rw(dev, np->phyaddr,
1357			      PHY_VITESSE_INIT_REG3, MII_READ);
1358	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1359		return PHY_ERROR;
1360	if (mii_rw(dev, np->phyaddr,
1361		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1362		return PHY_ERROR;
1363	if (mii_rw(dev, np->phyaddr,
1364		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1365		return PHY_ERROR;
1366	phy_reserved = mii_rw(dev, np->phyaddr,
1367			      PHY_VITESSE_INIT_REG4, MII_READ);
1368	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1369		return PHY_ERROR;
1370	phy_reserved = mii_rw(dev, np->phyaddr,
1371			      PHY_VITESSE_INIT_REG3, MII_READ);
1372	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1373	phy_reserved |= PHY_VITESSE_INIT8;
1374	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1375		return PHY_ERROR;
1376	if (mii_rw(dev, np->phyaddr,
1377		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1378		return PHY_ERROR;
1379	if (mii_rw(dev, np->phyaddr,
1380		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1381		return PHY_ERROR;
1382
1383	return 0;
1384}
1385
1386static int phy_init(struct net_device *dev)
1387{
1388	struct fe_priv *np = get_nvpriv(dev);
1389	u8 __iomem *base = get_hwbase(dev);
1390	u32 phyinterface;
1391	u32 mii_status, mii_control, mii_control_1000, reg;
1392
1393	/* phy errata for E3016 phy */
1394	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1395		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1396		reg &= ~PHY_MARVELL_E3016_INITMASK;
1397		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1398			netdev_info(dev, "%s: phy write to errata reg failed\n",
1399				    pci_name(np->pci_dev));
1400			return PHY_ERROR;
1401		}
1402	}
1403	if (np->phy_oui == PHY_OUI_REALTEK) {
1404		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1405		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1406			if (init_realtek_8211b(dev, np)) {
1407				netdev_info(dev, "%s: phy init failed\n",
1408					    pci_name(np->pci_dev));
1409				return PHY_ERROR;
1410			}
1411		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1412			   np->phy_rev == PHY_REV_REALTEK_8211C) {
1413			if (init_realtek_8211c(dev, np)) {
1414				netdev_info(dev, "%s: phy init failed\n",
1415					    pci_name(np->pci_dev));
1416				return PHY_ERROR;
1417			}
1418		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1419			if (init_realtek_8201(dev, np)) {
1420				netdev_info(dev, "%s: phy init failed\n",
1421					    pci_name(np->pci_dev));
1422				return PHY_ERROR;
1423			}
1424		}
1425	}
1426
1427	/* set advertise register */
1428	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1429	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1430		ADVERTISE_100HALF | ADVERTISE_100FULL |
1431		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1432	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1433		netdev_info(dev, "%s: phy write to advertise failed\n",
1434			    pci_name(np->pci_dev));
1435		return PHY_ERROR;
1436	}
1437
1438	/* get phy interface type */
1439	phyinterface = readl(base + NvRegPhyInterface);
1440
1441	/* see if gigabit phy */
1442	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1443	if (mii_status & PHY_GIGABIT) {
1444		np->gigabit = PHY_GIGABIT;
1445		mii_control_1000 = mii_rw(dev, np->phyaddr,
1446					  MII_CTRL1000, MII_READ);
1447		mii_control_1000 &= ~ADVERTISE_1000HALF;
1448		if (phyinterface & PHY_RGMII)
1449			mii_control_1000 |= ADVERTISE_1000FULL;
1450		else
1451			mii_control_1000 &= ~ADVERTISE_1000FULL;
1452
1453		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1454			netdev_info(dev, "%s: phy init failed\n",
1455				    pci_name(np->pci_dev));
1456			return PHY_ERROR;
1457		}
1458	} else
1459		np->gigabit = 0;
1460
1461	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1462	mii_control |= BMCR_ANENABLE;
1463
1464	if (np->phy_oui == PHY_OUI_REALTEK &&
1465	    np->phy_model == PHY_MODEL_REALTEK_8211 &&
1466	    np->phy_rev == PHY_REV_REALTEK_8211C) {
1467		/* start autoneg since we already performed hw reset above */
1468		mii_control |= BMCR_ANRESTART;
1469		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1470			netdev_info(dev, "%s: phy init failed\n",
1471				    pci_name(np->pci_dev));
1472			return PHY_ERROR;
1473		}
1474	} else {
1475		/* reset the phy
1476		 * (certain phys need bmcr to be setup with reset)
1477		 */
1478		if (phy_reset(dev, mii_control)) {
1479			netdev_info(dev, "%s: phy reset failed\n",
1480				    pci_name(np->pci_dev));
1481			return PHY_ERROR;
1482		}
1483	}
1484
1485	/* phy vendor specific configuration */
1486	if (np->phy_oui == PHY_OUI_CICADA) {
1487		if (init_cicada(dev, np, phyinterface)) {
1488			netdev_info(dev, "%s: phy init failed\n",
1489				    pci_name(np->pci_dev));
1490			return PHY_ERROR;
1491		}
1492	} else if (np->phy_oui == PHY_OUI_VITESSE) {
1493		if (init_vitesse(dev, np)) {
1494			netdev_info(dev, "%s: phy init failed\n",
1495				    pci_name(np->pci_dev));
1496			return PHY_ERROR;
1497		}
1498	} else if (np->phy_oui == PHY_OUI_REALTEK) {
1499		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1500		    np->phy_rev == PHY_REV_REALTEK_8211B) {
1501			/* reset could have cleared these out, set them back */
1502			if (init_realtek_8211b(dev, np)) {
1503				netdev_info(dev, "%s: phy init failed\n",
1504					    pci_name(np->pci_dev));
1505				return PHY_ERROR;
1506			}
1507		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1508			if (init_realtek_8201(dev, np) ||
1509			    init_realtek_8201_cross(dev, np)) {
1510				netdev_info(dev, "%s: phy init failed\n",
1511					    pci_name(np->pci_dev));
1512				return PHY_ERROR;
1513			}
1514		}
1515	}
1516
1517	/* some phys clear out pause advertisement on reset, set it back */
1518	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1519
1520	/* restart auto negotiation, power down phy */
1521	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1522	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1523	if (phy_power_down)
1524		mii_control |= BMCR_PDOWN;
1525	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1526		return PHY_ERROR;
1527
1528	return 0;
1529}
1530
1531static void nv_start_rx(struct net_device *dev)
1532{
1533	struct fe_priv *np = netdev_priv(dev);
1534	u8 __iomem *base = get_hwbase(dev);
1535	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1536
1537	/* Already running? Stop it. */
1538	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1539		rx_ctrl &= ~NVREG_RCVCTL_START;
1540		writel(rx_ctrl, base + NvRegReceiverControl);
1541		pci_push(base);
1542	}
1543	writel(np->linkspeed, base + NvRegLinkSpeed);
1544	pci_push(base);
1545	rx_ctrl |= NVREG_RCVCTL_START;
1546	if (np->mac_in_use)
1547		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1548	writel(rx_ctrl, base + NvRegReceiverControl);
1549	pci_push(base);
1550}
1551
1552static void nv_stop_rx(struct net_device *dev)
1553{
1554	struct fe_priv *np = netdev_priv(dev);
1555	u8 __iomem *base = get_hwbase(dev);
1556	u32 rx_ctrl = readl(base + NvRegReceiverControl);
1557
1558	if (!np->mac_in_use)
1559		rx_ctrl &= ~NVREG_RCVCTL_START;
1560	else
1561		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1562	writel(rx_ctrl, base + NvRegReceiverControl);
1563	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1564		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1565		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1566			    __func__);
1567
1568	udelay(NV_RXSTOP_DELAY2);
1569	if (!np->mac_in_use)
1570		writel(0, base + NvRegLinkSpeed);
1571}
1572
1573static void nv_start_tx(struct net_device *dev)
1574{
1575	struct fe_priv *np = netdev_priv(dev);
1576	u8 __iomem *base = get_hwbase(dev);
1577	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1578
1579	tx_ctrl |= NVREG_XMITCTL_START;
1580	if (np->mac_in_use)
1581		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1582	writel(tx_ctrl, base + NvRegTransmitterControl);
1583	pci_push(base);
1584}
1585
1586static void nv_stop_tx(struct net_device *dev)
1587{
1588	struct fe_priv *np = netdev_priv(dev);
1589	u8 __iomem *base = get_hwbase(dev);
1590	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1591
1592	if (!np->mac_in_use)
1593		tx_ctrl &= ~NVREG_XMITCTL_START;
1594	else
1595		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1596	writel(tx_ctrl, base + NvRegTransmitterControl);
1597	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1598		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1599		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1600			    __func__);
1601
1602	udelay(NV_TXSTOP_DELAY2);
1603	if (!np->mac_in_use)
1604		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1605		       base + NvRegTransmitPoll);
1606}
1607
1608static void nv_start_rxtx(struct net_device *dev)
1609{
1610	nv_start_rx(dev);
1611	nv_start_tx(dev);
1612}
1613
1614static void nv_stop_rxtx(struct net_device *dev)
1615{
1616	nv_stop_rx(dev);
1617	nv_stop_tx(dev);
1618}
1619
1620static void nv_txrx_reset(struct net_device *dev)
1621{
1622	struct fe_priv *np = netdev_priv(dev);
1623	u8 __iomem *base = get_hwbase(dev);
1624
1625	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1626	pci_push(base);
1627	udelay(NV_TXRX_RESET_DELAY);
1628	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1629	pci_push(base);
1630}
1631
1632static void nv_mac_reset(struct net_device *dev)
1633{
1634	struct fe_priv *np = netdev_priv(dev);
1635	u8 __iomem *base = get_hwbase(dev);
1636	u32 temp1, temp2, temp3;
1637
1638	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1639	pci_push(base);
1640
1641	/* save registers since they will be cleared on reset */
1642	temp1 = readl(base + NvRegMacAddrA);
1643	temp2 = readl(base + NvRegMacAddrB);
1644	temp3 = readl(base + NvRegTransmitPoll);
1645
1646	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1647	pci_push(base);
1648	udelay(NV_MAC_RESET_DELAY);
1649	writel(0, base + NvRegMacReset);
1650	pci_push(base);
1651	udelay(NV_MAC_RESET_DELAY);
1652
1653	/* restore saved registers */
1654	writel(temp1, base + NvRegMacAddrA);
1655	writel(temp2, base + NvRegMacAddrB);
1656	writel(temp3, base + NvRegTransmitPoll);
1657
1658	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1659	pci_push(base);
1660}
1661
1662/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
1663static void nv_update_stats(struct net_device *dev)
1664{
1665	struct fe_priv *np = netdev_priv(dev);
1666	u8 __iomem *base = get_hwbase(dev);
1667
1668	lockdep_assert_held(&np->hwstats_lock);
 
 
 
 
1669
1670	/* query hardware */
1671	np->estats.tx_bytes += readl(base + NvRegTxCnt);
1672	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1673	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1674	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1675	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1676	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1677	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1678	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1679	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1680	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1681	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1682	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1683	np->estats.rx_runt += readl(base + NvRegRxRunt);
1684	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1685	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1686	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1687	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1688	np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1689	np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1690	np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1691	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1692	np->estats.rx_packets =
1693		np->estats.rx_unicast +
1694		np->estats.rx_multicast +
1695		np->estats.rx_broadcast;
1696	np->estats.rx_errors_total =
1697		np->estats.rx_crc_errors +
1698		np->estats.rx_over_errors +
1699		np->estats.rx_frame_error +
1700		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1701		np->estats.rx_late_collision +
1702		np->estats.rx_runt +
1703		np->estats.rx_frame_too_long;
1704	np->estats.tx_errors_total =
1705		np->estats.tx_late_collision +
1706		np->estats.tx_fifo_errors +
1707		np->estats.tx_carrier_errors +
1708		np->estats.tx_excess_deferral +
1709		np->estats.tx_retry_error;
1710
1711	if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1712		np->estats.tx_deferral += readl(base + NvRegTxDef);
1713		np->estats.tx_packets += readl(base + NvRegTxFrame);
1714		np->estats.rx_bytes += readl(base + NvRegRxCnt);
1715		np->estats.tx_pause += readl(base + NvRegTxPause);
1716		np->estats.rx_pause += readl(base + NvRegRxPause);
1717		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1718		np->estats.rx_errors_total += np->estats.rx_drop_frame;
1719	}
1720
1721	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1722		np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1723		np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1724		np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1725	}
1726}
1727
1728static void nv_get_stats(int cpu, struct fe_priv *np,
1729			 struct rtnl_link_stats64 *storage)
1730{
1731	struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1732	unsigned int syncp_start;
1733	u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1734	u64 tx_packets, tx_bytes, tx_dropped;
1735
1736	do {
1737		syncp_start = u64_stats_fetch_begin(&np->swstats_rx_syncp);
1738		rx_packets       = src->stat_rx_packets;
1739		rx_bytes         = src->stat_rx_bytes;
1740		rx_dropped       = src->stat_rx_dropped;
1741		rx_missed_errors = src->stat_rx_missed_errors;
1742	} while (u64_stats_fetch_retry(&np->swstats_rx_syncp, syncp_start));
1743
1744	storage->rx_packets       += rx_packets;
1745	storage->rx_bytes         += rx_bytes;
1746	storage->rx_dropped       += rx_dropped;
1747	storage->rx_missed_errors += rx_missed_errors;
1748
1749	do {
1750		syncp_start = u64_stats_fetch_begin(&np->swstats_tx_syncp);
1751		tx_packets  = src->stat_tx_packets;
1752		tx_bytes    = src->stat_tx_bytes;
1753		tx_dropped  = src->stat_tx_dropped;
1754	} while (u64_stats_fetch_retry(&np->swstats_tx_syncp, syncp_start));
1755
1756	storage->tx_packets += tx_packets;
1757	storage->tx_bytes   += tx_bytes;
1758	storage->tx_dropped += tx_dropped;
1759}
1760
1761/*
1762 * nv_get_stats64: dev->ndo_get_stats64 function
1763 * Get latest stats value from the nic.
1764 * Called with read_lock(&dev_base_lock) held for read -
1765 * only synchronized against unregister_netdevice.
1766 */
1767static void
1768nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1769	__acquires(&netdev_priv(dev)->hwstats_lock)
1770	__releases(&netdev_priv(dev)->hwstats_lock)
1771{
1772	struct fe_priv *np = netdev_priv(dev);
1773	int cpu;
1774
1775	/*
1776	 * Note: because HW stats are not always available and for
1777	 * consistency reasons, the following ifconfig stats are
1778	 * managed by software: rx_bytes, tx_bytes, rx_packets and
1779	 * tx_packets. The related hardware stats reported by ethtool
1780	 * should be equivalent to these ifconfig stats, with 4
1781	 * additional bytes per packet (Ethernet FCS CRC), except for
1782	 * tx_packets when TSO kicks in.
1783	 */
1784
1785	/* software stats */
1786	for_each_online_cpu(cpu)
1787		nv_get_stats(cpu, np, storage);
 
 
 
 
 
 
 
 
 
 
 
 
1788
1789	/* If the nic supports hw counters then retrieve latest values */
1790	if (np->driver_data & DEV_HAS_STATISTICS_V123) {
1791		spin_lock_bh(&np->hwstats_lock);
1792
1793		nv_update_stats(dev);
1794
1795		/* generic stats */
1796		storage->rx_errors = np->estats.rx_errors_total;
1797		storage->tx_errors = np->estats.tx_errors_total;
1798
1799		/* meaningful only when NIC supports stats v3 */
1800		storage->multicast = np->estats.rx_multicast;
1801
1802		/* detailed rx_errors */
1803		storage->rx_length_errors = np->estats.rx_length_error;
1804		storage->rx_over_errors   = np->estats.rx_over_errors;
1805		storage->rx_crc_errors    = np->estats.rx_crc_errors;
1806		storage->rx_frame_errors  = np->estats.rx_frame_align_error;
1807		storage->rx_fifo_errors   = np->estats.rx_drop_frame;
1808
1809		/* detailed tx_errors */
1810		storage->tx_carrier_errors = np->estats.tx_carrier_errors;
1811		storage->tx_fifo_errors    = np->estats.tx_fifo_errors;
1812
1813		spin_unlock_bh(&np->hwstats_lock);
1814	}
 
 
1815}
1816
1817/*
1818 * nv_alloc_rx: fill rx ring entries.
1819 * Return 1 if the allocations for the skbs failed and the
1820 * rx engine is without Available descriptors
1821 */
1822static int nv_alloc_rx(struct net_device *dev)
1823{
1824	struct fe_priv *np = netdev_priv(dev);
1825	struct ring_desc *less_rx;
1826
1827	less_rx = np->get_rx.orig;
1828	if (less_rx-- == np->rx_ring.orig)
1829		less_rx = np->last_rx.orig;
1830
1831	while (np->put_rx.orig != less_rx) {
1832		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1833		if (likely(skb)) {
1834			np->put_rx_ctx->skb = skb;
1835			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1836							     skb->data,
1837							     skb_tailroom(skb),
1838							     DMA_FROM_DEVICE);
1839			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1840						       np->put_rx_ctx->dma))) {
1841				kfree_skb(skb);
1842				goto packet_dropped;
1843			}
1844			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1845			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1846			wmb();
1847			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1848			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1849				np->put_rx.orig = np->rx_ring.orig;
1850			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1851				np->put_rx_ctx = np->rx_skb;
1852		} else {
1853packet_dropped:
1854			u64_stats_update_begin(&np->swstats_rx_syncp);
1855			nv_txrx_stats_inc(stat_rx_dropped);
1856			u64_stats_update_end(&np->swstats_rx_syncp);
1857			return 1;
1858		}
1859	}
1860	return 0;
1861}
1862
1863static int nv_alloc_rx_optimized(struct net_device *dev)
1864{
1865	struct fe_priv *np = netdev_priv(dev);
1866	struct ring_desc_ex *less_rx;
1867
1868	less_rx = np->get_rx.ex;
1869	if (less_rx-- == np->rx_ring.ex)
1870		less_rx = np->last_rx.ex;
1871
1872	while (np->put_rx.ex != less_rx) {
1873		struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD);
1874		if (likely(skb)) {
1875			np->put_rx_ctx->skb = skb;
1876			np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev,
1877							     skb->data,
1878							     skb_tailroom(skb),
1879							     DMA_FROM_DEVICE);
1880			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1881						       np->put_rx_ctx->dma))) {
1882				kfree_skb(skb);
1883				goto packet_dropped;
1884			}
1885			np->put_rx_ctx->dma_len = skb_tailroom(skb);
1886			np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1887			np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1888			wmb();
1889			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1890			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1891				np->put_rx.ex = np->rx_ring.ex;
1892			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1893				np->put_rx_ctx = np->rx_skb;
1894		} else {
1895packet_dropped:
1896			u64_stats_update_begin(&np->swstats_rx_syncp);
1897			nv_txrx_stats_inc(stat_rx_dropped);
1898			u64_stats_update_end(&np->swstats_rx_syncp);
1899			return 1;
1900		}
1901	}
1902	return 0;
1903}
1904
1905/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1906static void nv_do_rx_refill(struct timer_list *t)
1907{
1908	struct fe_priv *np = from_timer(np, t, oom_kick);
 
1909
1910	/* Just reschedule NAPI rx processing */
1911	napi_schedule(&np->napi);
1912}
1913
1914static void nv_init_rx(struct net_device *dev)
1915{
1916	struct fe_priv *np = netdev_priv(dev);
1917	int i;
1918
1919	np->get_rx = np->rx_ring;
1920	np->put_rx = np->rx_ring;
1921
1922	if (!nv_optimized(np))
1923		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1924	else
1925		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1926	np->get_rx_ctx = np->rx_skb;
1927	np->put_rx_ctx = np->rx_skb;
1928	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1929
1930	for (i = 0; i < np->rx_ring_size; i++) {
1931		if (!nv_optimized(np)) {
1932			np->rx_ring.orig[i].flaglen = 0;
1933			np->rx_ring.orig[i].buf = 0;
1934		} else {
1935			np->rx_ring.ex[i].flaglen = 0;
1936			np->rx_ring.ex[i].txvlan = 0;
1937			np->rx_ring.ex[i].bufhigh = 0;
1938			np->rx_ring.ex[i].buflow = 0;
1939		}
1940		np->rx_skb[i].skb = NULL;
1941		np->rx_skb[i].dma = 0;
1942	}
1943}
1944
1945static void nv_init_tx(struct net_device *dev)
1946{
1947	struct fe_priv *np = netdev_priv(dev);
1948	int i;
1949
1950	np->get_tx = np->tx_ring;
1951	np->put_tx = np->tx_ring;
1952
1953	if (!nv_optimized(np))
1954		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1955	else
1956		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1957	np->get_tx_ctx = np->tx_skb;
1958	np->put_tx_ctx = np->tx_skb;
1959	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1960	netdev_reset_queue(np->dev);
1961	np->tx_pkts_in_progress = 0;
1962	np->tx_change_owner = NULL;
1963	np->tx_end_flip = NULL;
1964	np->tx_stop = 0;
1965
1966	for (i = 0; i < np->tx_ring_size; i++) {
1967		if (!nv_optimized(np)) {
1968			np->tx_ring.orig[i].flaglen = 0;
1969			np->tx_ring.orig[i].buf = 0;
1970		} else {
1971			np->tx_ring.ex[i].flaglen = 0;
1972			np->tx_ring.ex[i].txvlan = 0;
1973			np->tx_ring.ex[i].bufhigh = 0;
1974			np->tx_ring.ex[i].buflow = 0;
1975		}
1976		np->tx_skb[i].skb = NULL;
1977		np->tx_skb[i].dma = 0;
1978		np->tx_skb[i].dma_len = 0;
1979		np->tx_skb[i].dma_single = 0;
1980		np->tx_skb[i].first_tx_desc = NULL;
1981		np->tx_skb[i].next_tx_ctx = NULL;
1982	}
1983}
1984
1985static int nv_init_ring(struct net_device *dev)
1986{
1987	struct fe_priv *np = netdev_priv(dev);
1988
1989	nv_init_tx(dev);
1990	nv_init_rx(dev);
1991
1992	if (!nv_optimized(np))
1993		return nv_alloc_rx(dev);
1994	else
1995		return nv_alloc_rx_optimized(dev);
1996}
1997
1998static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1999{
2000	if (tx_skb->dma) {
2001		if (tx_skb->dma_single)
2002			dma_unmap_single(&np->pci_dev->dev, tx_skb->dma,
2003					 tx_skb->dma_len,
2004					 DMA_TO_DEVICE);
2005		else
2006			dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
2007				       tx_skb->dma_len,
2008				       DMA_TO_DEVICE);
2009		tx_skb->dma = 0;
2010	}
2011}
2012
2013static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
2014{
2015	nv_unmap_txskb(np, tx_skb);
2016	if (tx_skb->skb) {
2017		dev_kfree_skb_any(tx_skb->skb);
2018		tx_skb->skb = NULL;
2019		return 1;
2020	}
2021	return 0;
2022}
2023
2024static void nv_drain_tx(struct net_device *dev)
2025{
2026	struct fe_priv *np = netdev_priv(dev);
2027	unsigned int i;
2028
2029	for (i = 0; i < np->tx_ring_size; i++) {
2030		if (!nv_optimized(np)) {
2031			np->tx_ring.orig[i].flaglen = 0;
2032			np->tx_ring.orig[i].buf = 0;
2033		} else {
2034			np->tx_ring.ex[i].flaglen = 0;
2035			np->tx_ring.ex[i].txvlan = 0;
2036			np->tx_ring.ex[i].bufhigh = 0;
2037			np->tx_ring.ex[i].buflow = 0;
2038		}
2039		if (nv_release_txskb(np, &np->tx_skb[i])) {
2040			u64_stats_update_begin(&np->swstats_tx_syncp);
2041			nv_txrx_stats_inc(stat_tx_dropped);
2042			u64_stats_update_end(&np->swstats_tx_syncp);
2043		}
2044		np->tx_skb[i].dma = 0;
2045		np->tx_skb[i].dma_len = 0;
2046		np->tx_skb[i].dma_single = 0;
2047		np->tx_skb[i].first_tx_desc = NULL;
2048		np->tx_skb[i].next_tx_ctx = NULL;
2049	}
2050	np->tx_pkts_in_progress = 0;
2051	np->tx_change_owner = NULL;
2052	np->tx_end_flip = NULL;
2053}
2054
2055static void nv_drain_rx(struct net_device *dev)
2056{
2057	struct fe_priv *np = netdev_priv(dev);
2058	int i;
2059
2060	for (i = 0; i < np->rx_ring_size; i++) {
2061		if (!nv_optimized(np)) {
2062			np->rx_ring.orig[i].flaglen = 0;
2063			np->rx_ring.orig[i].buf = 0;
2064		} else {
2065			np->rx_ring.ex[i].flaglen = 0;
2066			np->rx_ring.ex[i].txvlan = 0;
2067			np->rx_ring.ex[i].bufhigh = 0;
2068			np->rx_ring.ex[i].buflow = 0;
2069		}
2070		wmb();
2071		if (np->rx_skb[i].skb) {
2072			dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma,
2073					 (skb_end_pointer(np->rx_skb[i].skb) -
2074					 np->rx_skb[i].skb->data),
2075					 DMA_FROM_DEVICE);
2076			dev_kfree_skb(np->rx_skb[i].skb);
2077			np->rx_skb[i].skb = NULL;
2078		}
2079	}
2080}
2081
2082static void nv_drain_rxtx(struct net_device *dev)
2083{
2084	nv_drain_tx(dev);
2085	nv_drain_rx(dev);
2086}
2087
2088static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
2089{
2090	return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
2091}
2092
2093static void nv_legacybackoff_reseed(struct net_device *dev)
2094{
2095	u8 __iomem *base = get_hwbase(dev);
2096	u32 reg;
2097	u32 low;
2098	int tx_status = 0;
2099
2100	reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
2101	get_random_bytes(&low, sizeof(low));
2102	reg |= low & NVREG_SLOTTIME_MASK;
2103
2104	/* Need to stop tx before change takes effect.
2105	 * Caller has already gained np->lock.
2106	 */
2107	tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
2108	if (tx_status)
2109		nv_stop_tx(dev);
2110	nv_stop_rx(dev);
2111	writel(reg, base + NvRegSlotTime);
2112	if (tx_status)
2113		nv_start_tx(dev);
2114	nv_start_rx(dev);
2115}
2116
2117/* Gear Backoff Seeds */
2118#define BACKOFF_SEEDSET_ROWS	8
2119#define BACKOFF_SEEDSET_LFSRS	15
2120
2121/* Known Good seed sets */
2122static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2123	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2124	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2125	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2126	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2127	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2128	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2129	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
2130	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2131
2132static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2133	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2134	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2135	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2136	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2137	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
2138	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2139	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2140	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2141
2142static void nv_gear_backoff_reseed(struct net_device *dev)
2143{
2144	u8 __iomem *base = get_hwbase(dev);
2145	u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2146	u32 temp, seedset, combinedSeed;
2147	int i;
2148
2149	/* Setup seed for free running LFSR */
2150	/* We are going to read the time stamp counter 3 times
2151	   and swizzle bits around to increase randomness */
2152	get_random_bytes(&miniseed1, sizeof(miniseed1));
2153	miniseed1 &= 0x0fff;
2154	if (miniseed1 == 0)
2155		miniseed1 = 0xabc;
2156
2157	get_random_bytes(&miniseed2, sizeof(miniseed2));
2158	miniseed2 &= 0x0fff;
2159	if (miniseed2 == 0)
2160		miniseed2 = 0xabc;
2161	miniseed2_reversed =
2162		((miniseed2 & 0xF00) >> 8) |
2163		 (miniseed2 & 0x0F0) |
2164		 ((miniseed2 & 0x00F) << 8);
2165
2166	get_random_bytes(&miniseed3, sizeof(miniseed3));
2167	miniseed3 &= 0x0fff;
2168	if (miniseed3 == 0)
2169		miniseed3 = 0xabc;
2170	miniseed3_reversed =
2171		((miniseed3 & 0xF00) >> 8) |
2172		 (miniseed3 & 0x0F0) |
2173		 ((miniseed3 & 0x00F) << 8);
2174
2175	combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2176		       (miniseed2 ^ miniseed3_reversed);
2177
2178	/* Seeds can not be zero */
2179	if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2180		combinedSeed |= 0x08;
2181	if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2182		combinedSeed |= 0x8000;
2183
2184	/* No need to disable tx here */
2185	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2186	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2187	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2188	writel(temp, base + NvRegBackOffControl);
2189
2190	/* Setup seeds for all gear LFSRs. */
2191	get_random_bytes(&seedset, sizeof(seedset));
2192	seedset = seedset % BACKOFF_SEEDSET_ROWS;
2193	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2194		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2195		temp |= main_seedset[seedset][i-1] & 0x3ff;
2196		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2197		writel(temp, base + NvRegBackOffControl);
2198	}
2199}
2200
2201/*
2202 * nv_start_xmit: dev->hard_start_xmit function
2203 * Called with netif_tx_lock held.
2204 */
2205static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2206{
2207	struct fe_priv *np = netdev_priv(dev);
2208	u32 tx_flags = 0;
2209	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2210	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2211	unsigned int i;
2212	u32 offset = 0;
2213	u32 bcnt;
2214	u32 size = skb_headlen(skb);
2215	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2216	u32 empty_slots;
2217	struct ring_desc *put_tx;
2218	struct ring_desc *start_tx;
2219	struct ring_desc *prev_tx;
2220	struct nv_skb_map *prev_tx_ctx;
2221	struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2222	unsigned long flags;
2223	netdev_tx_t ret = NETDEV_TX_OK;
2224
2225	/* add fragments to entries count */
2226	for (i = 0; i < fragments; i++) {
2227		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2228
2229		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2230			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2231	}
2232
2233	spin_lock_irqsave(&np->lock, flags);
2234	empty_slots = nv_get_empty_tx_slots(np);
2235	if (unlikely(empty_slots <= entries)) {
2236		netif_stop_queue(dev);
2237		np->tx_stop = 1;
2238		spin_unlock_irqrestore(&np->lock, flags);
2239
2240		/* When normal packets and/or xmit_more packets fill up
2241		 * tx_desc, it is necessary to trigger NIC tx reg.
2242		 */
2243		ret = NETDEV_TX_BUSY;
2244		goto txkick;
2245	}
2246	spin_unlock_irqrestore(&np->lock, flags);
2247
2248	start_tx = put_tx = np->put_tx.orig;
2249
2250	/* setup the header buffer */
2251	do {
 
 
2252		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2253		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2254						     skb->data + offset, bcnt,
2255						     DMA_TO_DEVICE);
2256		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2257					       np->put_tx_ctx->dma))) {
2258			/* on DMA mapping error - drop the packet */
2259			dev_kfree_skb_any(skb);
2260			u64_stats_update_begin(&np->swstats_tx_syncp);
2261			nv_txrx_stats_inc(stat_tx_dropped);
2262			u64_stats_update_end(&np->swstats_tx_syncp);
2263
2264			ret = NETDEV_TX_OK;
2265
2266			goto dma_error;
2267		}
2268		np->put_tx_ctx->dma_len = bcnt;
2269		np->put_tx_ctx->dma_single = 1;
2270		put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2271		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2272
2273		tx_flags = np->tx_flags;
2274		offset += bcnt;
2275		size -= bcnt;
2276		if (unlikely(put_tx++ == np->last_tx.orig))
2277			put_tx = np->tx_ring.orig;
2278		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2279			np->put_tx_ctx = np->tx_skb;
2280	} while (size);
2281
2282	/* setup the fragments */
2283	for (i = 0; i < fragments; i++) {
2284		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2285		u32 frag_size = skb_frag_size(frag);
2286		offset = 0;
2287
2288		do {
 
 
2289			if (!start_tx_ctx)
2290				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2291
2292			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2293			np->put_tx_ctx->dma = skb_frag_dma_map(
2294							&np->pci_dev->dev,
2295							frag, offset,
2296							bcnt,
2297							DMA_TO_DEVICE);
2298			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2299						       np->put_tx_ctx->dma))) {
2300
2301				/* Unwind the mapped fragments */
2302				do {
2303					nv_unmap_txskb(np, start_tx_ctx);
2304					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2305						tmp_tx_ctx = np->tx_skb;
2306				} while (tmp_tx_ctx != np->put_tx_ctx);
2307				dev_kfree_skb_any(skb);
2308				np->put_tx_ctx = start_tx_ctx;
2309				u64_stats_update_begin(&np->swstats_tx_syncp);
2310				nv_txrx_stats_inc(stat_tx_dropped);
2311				u64_stats_update_end(&np->swstats_tx_syncp);
2312
2313				ret = NETDEV_TX_OK;
2314
2315				goto dma_error;
2316			}
2317
2318			np->put_tx_ctx->dma_len = bcnt;
2319			np->put_tx_ctx->dma_single = 0;
2320			put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2321			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2322
2323			offset += bcnt;
2324			frag_size -= bcnt;
2325			if (unlikely(put_tx++ == np->last_tx.orig))
2326				put_tx = np->tx_ring.orig;
2327			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2328				np->put_tx_ctx = np->tx_skb;
2329		} while (frag_size);
2330	}
2331
2332	if (unlikely(put_tx == np->tx_ring.orig))
2333		prev_tx = np->last_tx.orig;
2334	else
2335		prev_tx = put_tx - 1;
2336
2337	if (unlikely(np->put_tx_ctx == np->tx_skb))
2338		prev_tx_ctx = np->last_tx_ctx;
2339	else
2340		prev_tx_ctx = np->put_tx_ctx - 1;
2341
2342	/* set last fragment flag  */
2343	prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2344
2345	/* save skb in this slot's context area */
2346	prev_tx_ctx->skb = skb;
2347
2348	if (skb_is_gso(skb))
2349		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2350	else
2351		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2352			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2353
2354	spin_lock_irqsave(&np->lock, flags);
2355
2356	/* set tx flags */
2357	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2358
2359	netdev_sent_queue(np->dev, skb->len);
2360
2361	skb_tx_timestamp(skb);
2362
2363	np->put_tx.orig = put_tx;
2364
2365	spin_unlock_irqrestore(&np->lock, flags);
2366
2367txkick:
2368	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2369		u32 txrxctl_kick;
2370dma_error:
2371		txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2372		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2373	}
2374
2375	return ret;
2376}
2377
2378static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2379					   struct net_device *dev)
2380{
2381	struct fe_priv *np = netdev_priv(dev);
2382	u32 tx_flags = 0;
2383	u32 tx_flags_extra;
2384	unsigned int fragments = skb_shinfo(skb)->nr_frags;
2385	unsigned int i;
2386	u32 offset = 0;
2387	u32 bcnt;
2388	u32 size = skb_headlen(skb);
2389	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2390	u32 empty_slots;
2391	struct ring_desc_ex *put_tx;
2392	struct ring_desc_ex *start_tx;
2393	struct ring_desc_ex *prev_tx;
2394	struct nv_skb_map *prev_tx_ctx;
2395	struct nv_skb_map *start_tx_ctx = NULL;
2396	struct nv_skb_map *tmp_tx_ctx = NULL;
2397	unsigned long flags;
2398	netdev_tx_t ret = NETDEV_TX_OK;
2399
2400	/* add fragments to entries count */
2401	for (i = 0; i < fragments; i++) {
2402		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2403
2404		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2405			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2406	}
2407
2408	spin_lock_irqsave(&np->lock, flags);
2409	empty_slots = nv_get_empty_tx_slots(np);
2410	if (unlikely(empty_slots <= entries)) {
2411		netif_stop_queue(dev);
2412		np->tx_stop = 1;
2413		spin_unlock_irqrestore(&np->lock, flags);
2414
2415		/* When normal packets and/or xmit_more packets fill up
2416		 * tx_desc, it is necessary to trigger NIC tx reg.
2417		 */
2418		ret = NETDEV_TX_BUSY;
2419
2420		goto txkick;
2421	}
2422	spin_unlock_irqrestore(&np->lock, flags);
2423
2424	start_tx = put_tx = np->put_tx.ex;
2425	start_tx_ctx = np->put_tx_ctx;
2426
2427	/* setup the header buffer */
2428	do {
 
 
2429		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2430		np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
2431						     skb->data + offset, bcnt,
2432						     DMA_TO_DEVICE);
2433		if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2434					       np->put_tx_ctx->dma))) {
2435			/* on DMA mapping error - drop the packet */
2436			dev_kfree_skb_any(skb);
2437			u64_stats_update_begin(&np->swstats_tx_syncp);
2438			nv_txrx_stats_inc(stat_tx_dropped);
2439			u64_stats_update_end(&np->swstats_tx_syncp);
2440
2441			ret = NETDEV_TX_OK;
2442
2443			goto dma_error;
2444		}
2445		np->put_tx_ctx->dma_len = bcnt;
2446		np->put_tx_ctx->dma_single = 1;
2447		put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2448		put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2449		put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2450
2451		tx_flags = NV_TX2_VALID;
2452		offset += bcnt;
2453		size -= bcnt;
2454		if (unlikely(put_tx++ == np->last_tx.ex))
2455			put_tx = np->tx_ring.ex;
2456		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2457			np->put_tx_ctx = np->tx_skb;
2458	} while (size);
2459
2460	/* setup the fragments */
2461	for (i = 0; i < fragments; i++) {
2462		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2463		u32 frag_size = skb_frag_size(frag);
2464		offset = 0;
2465
2466		do {
 
 
2467			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2468			if (!start_tx_ctx)
2469				start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2470			np->put_tx_ctx->dma = skb_frag_dma_map(
2471							&np->pci_dev->dev,
2472							frag, offset,
2473							bcnt,
2474							DMA_TO_DEVICE);
2475
2476			if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2477						       np->put_tx_ctx->dma))) {
2478
2479				/* Unwind the mapped fragments */
2480				do {
2481					nv_unmap_txskb(np, start_tx_ctx);
2482					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2483						tmp_tx_ctx = np->tx_skb;
2484				} while (tmp_tx_ctx != np->put_tx_ctx);
2485				dev_kfree_skb_any(skb);
2486				np->put_tx_ctx = start_tx_ctx;
2487				u64_stats_update_begin(&np->swstats_tx_syncp);
2488				nv_txrx_stats_inc(stat_tx_dropped);
2489				u64_stats_update_end(&np->swstats_tx_syncp);
2490
2491				ret = NETDEV_TX_OK;
2492
2493				goto dma_error;
2494			}
2495			np->put_tx_ctx->dma_len = bcnt;
2496			np->put_tx_ctx->dma_single = 0;
2497			put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2498			put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2499			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2500
2501			offset += bcnt;
2502			frag_size -= bcnt;
2503			if (unlikely(put_tx++ == np->last_tx.ex))
2504				put_tx = np->tx_ring.ex;
2505			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2506				np->put_tx_ctx = np->tx_skb;
2507		} while (frag_size);
2508	}
2509
2510	if (unlikely(put_tx == np->tx_ring.ex))
2511		prev_tx = np->last_tx.ex;
2512	else
2513		prev_tx = put_tx - 1;
2514
2515	if (unlikely(np->put_tx_ctx == np->tx_skb))
2516		prev_tx_ctx = np->last_tx_ctx;
2517	else
2518		prev_tx_ctx = np->put_tx_ctx - 1;
2519
2520	/* set last fragment flag  */
2521	prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2522
2523	/* save skb in this slot's context area */
2524	prev_tx_ctx->skb = skb;
2525
2526	if (skb_is_gso(skb))
2527		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2528	else
2529		tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2530			 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2531
2532	/* vlan tag */
2533	if (skb_vlan_tag_present(skb))
2534		start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2535					skb_vlan_tag_get(skb));
2536	else
2537		start_tx->txvlan = 0;
2538
2539	spin_lock_irqsave(&np->lock, flags);
2540
2541	if (np->tx_limit) {
2542		/* Limit the number of outstanding tx. Setup all fragments, but
2543		 * do not set the VALID bit on the first descriptor. Save a pointer
2544		 * to that descriptor and also for next skb_map element.
2545		 */
2546
2547		if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2548			if (!np->tx_change_owner)
2549				np->tx_change_owner = start_tx_ctx;
2550
2551			/* remove VALID bit */
2552			tx_flags &= ~NV_TX2_VALID;
2553			start_tx_ctx->first_tx_desc = start_tx;
2554			start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2555			np->tx_end_flip = np->put_tx_ctx;
2556		} else {
2557			np->tx_pkts_in_progress++;
2558		}
2559	}
2560
2561	/* set tx flags */
2562	start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2563
2564	netdev_sent_queue(np->dev, skb->len);
2565
2566	skb_tx_timestamp(skb);
2567
2568	np->put_tx.ex = put_tx;
2569
2570	spin_unlock_irqrestore(&np->lock, flags);
2571
2572txkick:
2573	if (netif_queue_stopped(dev) || !netdev_xmit_more()) {
2574		u32 txrxctl_kick;
2575dma_error:
2576		txrxctl_kick = NVREG_TXRXCTL_KICK | np->txrxctl_bits;
2577		writel(txrxctl_kick, get_hwbase(dev) + NvRegTxRxControl);
2578	}
2579
2580	return ret;
2581}
2582
2583static inline void nv_tx_flip_ownership(struct net_device *dev)
2584{
2585	struct fe_priv *np = netdev_priv(dev);
2586
2587	np->tx_pkts_in_progress--;
2588	if (np->tx_change_owner) {
2589		np->tx_change_owner->first_tx_desc->flaglen |=
2590			cpu_to_le32(NV_TX2_VALID);
2591		np->tx_pkts_in_progress++;
2592
2593		np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2594		if (np->tx_change_owner == np->tx_end_flip)
2595			np->tx_change_owner = NULL;
2596
2597		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2598	}
2599}
2600
2601/*
2602 * nv_tx_done: check for completed packets, release the skbs.
2603 *
2604 * Caller must own np->lock.
2605 */
2606static int nv_tx_done(struct net_device *dev, int limit)
2607{
2608	struct fe_priv *np = netdev_priv(dev);
2609	u32 flags;
2610	int tx_work = 0;
2611	struct ring_desc *orig_get_tx = np->get_tx.orig;
2612	unsigned int bytes_compl = 0;
2613
2614	while ((np->get_tx.orig != np->put_tx.orig) &&
2615	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2616	       (tx_work < limit)) {
2617
2618		nv_unmap_txskb(np, np->get_tx_ctx);
2619
2620		if (np->desc_ver == DESC_VER_1) {
2621			if (flags & NV_TX_LASTPACKET) {
2622				if (unlikely(flags & NV_TX_ERROR)) {
2623					if ((flags & NV_TX_RETRYERROR)
2624					    && !(flags & NV_TX_RETRYCOUNT_MASK))
2625						nv_legacybackoff_reseed(dev);
2626				} else {
2627					unsigned int len;
2628
2629					u64_stats_update_begin(&np->swstats_tx_syncp);
2630					nv_txrx_stats_inc(stat_tx_packets);
2631					len = np->get_tx_ctx->skb->len;
2632					nv_txrx_stats_add(stat_tx_bytes, len);
2633					u64_stats_update_end(&np->swstats_tx_syncp);
2634				}
2635				bytes_compl += np->get_tx_ctx->skb->len;
2636				dev_kfree_skb_any(np->get_tx_ctx->skb);
2637				np->get_tx_ctx->skb = NULL;
2638				tx_work++;
2639			}
2640		} else {
2641			if (flags & NV_TX2_LASTPACKET) {
2642				if (unlikely(flags & NV_TX2_ERROR)) {
2643					if ((flags & NV_TX2_RETRYERROR)
2644					    && !(flags & NV_TX2_RETRYCOUNT_MASK))
2645						nv_legacybackoff_reseed(dev);
2646				} else {
2647					unsigned int len;
2648
2649					u64_stats_update_begin(&np->swstats_tx_syncp);
2650					nv_txrx_stats_inc(stat_tx_packets);
2651					len = np->get_tx_ctx->skb->len;
2652					nv_txrx_stats_add(stat_tx_bytes, len);
2653					u64_stats_update_end(&np->swstats_tx_syncp);
2654				}
2655				bytes_compl += np->get_tx_ctx->skb->len;
2656				dev_kfree_skb_any(np->get_tx_ctx->skb);
2657				np->get_tx_ctx->skb = NULL;
2658				tx_work++;
2659			}
2660		}
2661		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2662			np->get_tx.orig = np->tx_ring.orig;
2663		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2664			np->get_tx_ctx = np->tx_skb;
2665	}
2666
2667	netdev_completed_queue(np->dev, tx_work, bytes_compl);
2668
2669	if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2670		np->tx_stop = 0;
2671		netif_wake_queue(dev);
2672	}
2673	return tx_work;
2674}
2675
2676static int nv_tx_done_optimized(struct net_device *dev, int limit)
2677{
2678	struct fe_priv *np = netdev_priv(dev);
2679	u32 flags;
2680	int tx_work = 0;
2681	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2682	unsigned long bytes_cleaned = 0;
2683
2684	while ((np->get_tx.ex != np->put_tx.ex) &&
2685	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2686	       (tx_work < limit)) {
2687
2688		nv_unmap_txskb(np, np->get_tx_ctx);
2689
2690		if (flags & NV_TX2_LASTPACKET) {
2691			if (unlikely(flags & NV_TX2_ERROR)) {
2692				if ((flags & NV_TX2_RETRYERROR)
2693				    && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2694					if (np->driver_data & DEV_HAS_GEAR_MODE)
2695						nv_gear_backoff_reseed(dev);
2696					else
2697						nv_legacybackoff_reseed(dev);
2698				}
2699			} else {
2700				unsigned int len;
2701
2702				u64_stats_update_begin(&np->swstats_tx_syncp);
2703				nv_txrx_stats_inc(stat_tx_packets);
2704				len = np->get_tx_ctx->skb->len;
2705				nv_txrx_stats_add(stat_tx_bytes, len);
2706				u64_stats_update_end(&np->swstats_tx_syncp);
2707			}
2708
2709			bytes_cleaned += np->get_tx_ctx->skb->len;
2710			dev_kfree_skb_any(np->get_tx_ctx->skb);
2711			np->get_tx_ctx->skb = NULL;
2712			tx_work++;
2713
2714			if (np->tx_limit)
2715				nv_tx_flip_ownership(dev);
2716		}
2717
2718		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2719			np->get_tx.ex = np->tx_ring.ex;
2720		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2721			np->get_tx_ctx = np->tx_skb;
2722	}
2723
2724	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
2725
2726	if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2727		np->tx_stop = 0;
2728		netif_wake_queue(dev);
2729	}
2730	return tx_work;
2731}
2732
2733/*
2734 * nv_tx_timeout: dev->tx_timeout function
2735 * Called with netif_tx_lock held.
2736 */
2737static void nv_tx_timeout(struct net_device *dev, unsigned int txqueue)
2738{
2739	struct fe_priv *np = netdev_priv(dev);
2740	u8 __iomem *base = get_hwbase(dev);
2741	u32 status;
2742	union ring_type put_tx;
2743	int saved_tx_limit;
2744
2745	if (np->msi_flags & NV_MSI_X_ENABLED)
2746		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2747	else
2748		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2749
2750	netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
2751
2752	if (unlikely(debug_tx_timeout)) {
2753		int i;
2754
2755		netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2756		netdev_info(dev, "Dumping tx registers\n");
2757		for (i = 0; i <= np->register_size; i += 32) {
2758			netdev_info(dev,
2759				    "%3x: %08x %08x %08x %08x "
2760				    "%08x %08x %08x %08x\n",
2761				    i,
2762				    readl(base + i + 0), readl(base + i + 4),
2763				    readl(base + i + 8), readl(base + i + 12),
2764				    readl(base + i + 16), readl(base + i + 20),
2765				    readl(base + i + 24), readl(base + i + 28));
2766		}
2767		netdev_info(dev, "Dumping tx ring\n");
2768		for (i = 0; i < np->tx_ring_size; i += 4) {
2769			if (!nv_optimized(np)) {
2770				netdev_info(dev,
2771					    "%03x: %08x %08x // %08x %08x "
2772					    "// %08x %08x // %08x %08x\n",
2773					    i,
2774					    le32_to_cpu(np->tx_ring.orig[i].buf),
2775					    le32_to_cpu(np->tx_ring.orig[i].flaglen),
2776					    le32_to_cpu(np->tx_ring.orig[i+1].buf),
2777					    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2778					    le32_to_cpu(np->tx_ring.orig[i+2].buf),
2779					    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2780					    le32_to_cpu(np->tx_ring.orig[i+3].buf),
2781					    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2782			} else {
2783				netdev_info(dev,
2784					    "%03x: %08x %08x %08x "
2785					    "// %08x %08x %08x "
2786					    "// %08x %08x %08x "
2787					    "// %08x %08x %08x\n",
2788					    i,
2789					    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2790					    le32_to_cpu(np->tx_ring.ex[i].buflow),
2791					    le32_to_cpu(np->tx_ring.ex[i].flaglen),
2792					    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2793					    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2794					    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2795					    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2796					    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2797					    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2798					    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2799					    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2800					    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2801			}
2802		}
2803	}
2804
2805	spin_lock_irq(&np->lock);
2806
2807	/* 1) stop tx engine */
2808	nv_stop_tx(dev);
2809
2810	/* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2811	saved_tx_limit = np->tx_limit;
2812	np->tx_limit = 0; /* prevent giving HW any limited pkts */
2813	np->tx_stop = 0;  /* prevent waking tx queue */
2814	if (!nv_optimized(np))
2815		nv_tx_done(dev, np->tx_ring_size);
2816	else
2817		nv_tx_done_optimized(dev, np->tx_ring_size);
2818
2819	/* save current HW position */
2820	if (np->tx_change_owner)
2821		put_tx.ex = np->tx_change_owner->first_tx_desc;
2822	else
2823		put_tx = np->put_tx;
2824
2825	/* 3) clear all tx state */
2826	nv_drain_tx(dev);
2827	nv_init_tx(dev);
2828
2829	/* 4) restore state to current HW position */
2830	np->get_tx = np->put_tx = put_tx;
2831	np->tx_limit = saved_tx_limit;
2832
2833	/* 5) restart tx engine */
2834	nv_start_tx(dev);
2835	netif_wake_queue(dev);
2836	spin_unlock_irq(&np->lock);
2837}
2838
2839/*
2840 * Called when the nic notices a mismatch between the actual data len on the
2841 * wire and the len indicated in the 802 header
2842 */
2843static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2844{
2845	int hdrlen;	/* length of the 802 header */
2846	int protolen;	/* length as stored in the proto field */
2847
2848	/* 1) calculate len according to header */
2849	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2850		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2851		hdrlen = VLAN_HLEN;
2852	} else {
2853		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2854		hdrlen = ETH_HLEN;
2855	}
2856	if (protolen > ETH_DATA_LEN)
2857		return datalen; /* Value in proto field not a len, no checks possible */
2858
2859	protolen += hdrlen;
2860	/* consistency checks: */
2861	if (datalen > ETH_ZLEN) {
2862		if (datalen >= protolen) {
2863			/* more data on wire than in 802 header, trim of
2864			 * additional data.
2865			 */
2866			return protolen;
2867		} else {
2868			/* less data on wire than mentioned in header.
2869			 * Discard the packet.
2870			 */
2871			return -1;
2872		}
2873	} else {
2874		/* short packet. Accept only if 802 values are also short */
2875		if (protolen > ETH_ZLEN) {
2876			return -1;
2877		}
2878		return datalen;
2879	}
2880}
2881
2882static void rx_missing_handler(u32 flags, struct fe_priv *np)
2883{
2884	if (flags & NV_RX_MISSEDFRAME) {
2885		u64_stats_update_begin(&np->swstats_rx_syncp);
2886		nv_txrx_stats_inc(stat_rx_missed_errors);
2887		u64_stats_update_end(&np->swstats_rx_syncp);
2888	}
2889}
2890
2891static int nv_rx_process(struct net_device *dev, int limit)
2892{
2893	struct fe_priv *np = netdev_priv(dev);
2894	u32 flags;
2895	int rx_work = 0;
2896	struct sk_buff *skb;
2897	int len;
2898
2899	while ((np->get_rx.orig != np->put_rx.orig) &&
2900	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2901		(rx_work < limit)) {
2902
2903		/*
2904		 * the packet is for us - immediately tear down the pci mapping.
2905		 * TODO: check if a prefetch of the first cacheline improves
2906		 * the performance.
2907		 */
2908		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
2909				 np->get_rx_ctx->dma_len,
2910				 DMA_FROM_DEVICE);
2911		skb = np->get_rx_ctx->skb;
2912		np->get_rx_ctx->skb = NULL;
2913
2914		/* look at what we actually got: */
2915		if (np->desc_ver == DESC_VER_1) {
2916			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2917				len = flags & LEN_MASK_V1;
2918				if (unlikely(flags & NV_RX_ERROR)) {
2919					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2920						len = nv_getlen(dev, skb->data, len);
2921						if (len < 0) {
2922							dev_kfree_skb(skb);
2923							goto next_pkt;
2924						}
2925					}
2926					/* framing errors are soft errors */
2927					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2928						if (flags & NV_RX_SUBTRACT1)
2929							len--;
2930					}
2931					/* the rest are hard errors */
2932					else {
2933						rx_missing_handler(flags, np);
 
 
 
 
2934						dev_kfree_skb(skb);
2935						goto next_pkt;
2936					}
2937				}
2938			} else {
2939				dev_kfree_skb(skb);
2940				goto next_pkt;
2941			}
2942		} else {
2943			if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2944				len = flags & LEN_MASK_V2;
2945				if (unlikely(flags & NV_RX2_ERROR)) {
2946					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2947						len = nv_getlen(dev, skb->data, len);
2948						if (len < 0) {
2949							dev_kfree_skb(skb);
2950							goto next_pkt;
2951						}
2952					}
2953					/* framing errors are soft errors */
2954					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2955						if (flags & NV_RX2_SUBTRACT1)
2956							len--;
2957					}
2958					/* the rest are hard errors */
2959					else {
2960						dev_kfree_skb(skb);
2961						goto next_pkt;
2962					}
2963				}
2964				if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2965				    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
2966					skb->ip_summed = CHECKSUM_UNNECESSARY;
2967			} else {
2968				dev_kfree_skb(skb);
2969				goto next_pkt;
2970			}
2971		}
2972		/* got a valid packet - forward it to the network core */
2973		skb_put(skb, len);
2974		skb->protocol = eth_type_trans(skb, dev);
2975		napi_gro_receive(&np->napi, skb);
2976		u64_stats_update_begin(&np->swstats_rx_syncp);
2977		nv_txrx_stats_inc(stat_rx_packets);
2978		nv_txrx_stats_add(stat_rx_bytes, len);
2979		u64_stats_update_end(&np->swstats_rx_syncp);
2980next_pkt:
2981		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2982			np->get_rx.orig = np->rx_ring.orig;
2983		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2984			np->get_rx_ctx = np->rx_skb;
2985
2986		rx_work++;
2987	}
2988
2989	return rx_work;
2990}
2991
2992static int nv_rx_process_optimized(struct net_device *dev, int limit)
2993{
2994	struct fe_priv *np = netdev_priv(dev);
2995	u32 flags;
2996	u32 vlanflags = 0;
2997	int rx_work = 0;
2998	struct sk_buff *skb;
2999	int len;
3000
3001	while ((np->get_rx.ex != np->put_rx.ex) &&
3002	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
3003	      (rx_work < limit)) {
3004
3005		/*
3006		 * the packet is for us - immediately tear down the pci mapping.
3007		 * TODO: check if a prefetch of the first cacheline improves
3008		 * the performance.
3009		 */
3010		dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma,
3011				 np->get_rx_ctx->dma_len,
3012				 DMA_FROM_DEVICE);
3013		skb = np->get_rx_ctx->skb;
3014		np->get_rx_ctx->skb = NULL;
3015
3016		/* look at what we actually got: */
3017		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
3018			len = flags & LEN_MASK_V2;
3019			if (unlikely(flags & NV_RX2_ERROR)) {
3020				if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
3021					len = nv_getlen(dev, skb->data, len);
3022					if (len < 0) {
3023						dev_kfree_skb(skb);
3024						goto next_pkt;
3025					}
3026				}
3027				/* framing errors are soft errors */
3028				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
3029					if (flags & NV_RX2_SUBTRACT1)
3030						len--;
3031				}
3032				/* the rest are hard errors */
3033				else {
3034					dev_kfree_skb(skb);
3035					goto next_pkt;
3036				}
3037			}
3038
3039			if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
3040			    ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP))   /*ip and udp */
3041				skb->ip_summed = CHECKSUM_UNNECESSARY;
3042
3043			/* got a valid packet - forward it to the network core */
3044			skb_put(skb, len);
3045			skb->protocol = eth_type_trans(skb, dev);
3046			prefetch(skb->data);
3047
3048			vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
3049
3050			/*
3051			 * There's need to check for NETIF_F_HW_VLAN_CTAG_RX
3052			 * here. Even if vlan rx accel is disabled,
3053			 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
3054			 */
3055			if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
3056			    vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
3057				u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
3058
3059				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3060			}
3061			napi_gro_receive(&np->napi, skb);
3062			u64_stats_update_begin(&np->swstats_rx_syncp);
3063			nv_txrx_stats_inc(stat_rx_packets);
3064			nv_txrx_stats_add(stat_rx_bytes, len);
3065			u64_stats_update_end(&np->swstats_rx_syncp);
3066		} else {
3067			dev_kfree_skb(skb);
3068		}
3069next_pkt:
3070		if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
3071			np->get_rx.ex = np->rx_ring.ex;
3072		if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
3073			np->get_rx_ctx = np->rx_skb;
3074
3075		rx_work++;
3076	}
3077
3078	return rx_work;
3079}
3080
3081static void set_bufsize(struct net_device *dev)
3082{
3083	struct fe_priv *np = netdev_priv(dev);
3084
3085	if (dev->mtu <= ETH_DATA_LEN)
3086		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
3087	else
3088		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
3089}
3090
3091/*
3092 * nv_change_mtu: dev->change_mtu function
3093 * Called with dev_base_lock held for read.
3094 */
3095static int nv_change_mtu(struct net_device *dev, int new_mtu)
3096{
3097	struct fe_priv *np = netdev_priv(dev);
3098	int old_mtu;
3099
 
 
 
3100	old_mtu = dev->mtu;
3101	dev->mtu = new_mtu;
3102
3103	/* return early if the buffer sizes will not change */
3104	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
3105		return 0;
 
 
3106
3107	/* synchronized against open : rtnl_lock() held by caller */
3108	if (netif_running(dev)) {
3109		u8 __iomem *base = get_hwbase(dev);
3110		/*
3111		 * It seems that the nic preloads valid ring entries into an
3112		 * internal buffer. The procedure for flushing everything is
3113		 * guessed, there is probably a simpler approach.
3114		 * Changing the MTU is a rare event, it shouldn't matter.
3115		 */
3116		nv_disable_irq(dev);
3117		nv_napi_disable(dev);
3118		netif_tx_lock_bh(dev);
3119		netif_addr_lock(dev);
3120		spin_lock(&np->lock);
3121		/* stop engines */
3122		nv_stop_rxtx(dev);
3123		nv_txrx_reset(dev);
3124		/* drain rx queue */
3125		nv_drain_rxtx(dev);
3126		/* reinit driver view of the rx queue */
3127		set_bufsize(dev);
3128		if (nv_init_ring(dev)) {
3129			if (!np->in_shutdown)
3130				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3131		}
3132		/* reinit nic view of the rx queue */
3133		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3134		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3135		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3136			base + NvRegRingSizes);
3137		pci_push(base);
3138		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3139		pci_push(base);
3140
3141		/* restart rx engine */
3142		nv_start_rxtx(dev);
3143		spin_unlock(&np->lock);
3144		netif_addr_unlock(dev);
3145		netif_tx_unlock_bh(dev);
3146		nv_napi_enable(dev);
3147		nv_enable_irq(dev);
3148	}
3149	return 0;
3150}
3151
3152static void nv_copy_mac_to_hw(struct net_device *dev)
3153{
3154	u8 __iomem *base = get_hwbase(dev);
3155	u32 mac[2];
3156
3157	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
3158			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
3159	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
3160
3161	writel(mac[0], base + NvRegMacAddrA);
3162	writel(mac[1], base + NvRegMacAddrB);
3163}
3164
3165/*
3166 * nv_set_mac_address: dev->set_mac_address function
3167 * Called with rtnl_lock() held.
3168 */
3169static int nv_set_mac_address(struct net_device *dev, void *addr)
3170{
3171	struct fe_priv *np = netdev_priv(dev);
3172	struct sockaddr *macaddr = (struct sockaddr *)addr;
3173
3174	if (!is_valid_ether_addr(macaddr->sa_data))
3175		return -EADDRNOTAVAIL;
3176
3177	/* synchronized against open : rtnl_lock() held by caller */
3178	eth_hw_addr_set(dev, macaddr->sa_data);
3179
3180	if (netif_running(dev)) {
3181		netif_tx_lock_bh(dev);
3182		netif_addr_lock(dev);
3183		spin_lock_irq(&np->lock);
3184
3185		/* stop rx engine */
3186		nv_stop_rx(dev);
3187
3188		/* set mac address */
3189		nv_copy_mac_to_hw(dev);
3190
3191		/* restart rx engine */
3192		nv_start_rx(dev);
3193		spin_unlock_irq(&np->lock);
3194		netif_addr_unlock(dev);
3195		netif_tx_unlock_bh(dev);
3196	} else {
3197		nv_copy_mac_to_hw(dev);
3198	}
3199	return 0;
3200}
3201
3202/*
3203 * nv_set_multicast: dev->set_multicast function
3204 * Called with netif_tx_lock held.
3205 */
3206static void nv_set_multicast(struct net_device *dev)
3207{
3208	struct fe_priv *np = netdev_priv(dev);
3209	u8 __iomem *base = get_hwbase(dev);
3210	u32 addr[2];
3211	u32 mask[2];
3212	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3213
3214	memset(addr, 0, sizeof(addr));
3215	memset(mask, 0, sizeof(mask));
3216
3217	if (dev->flags & IFF_PROMISC) {
3218		pff |= NVREG_PFF_PROMISC;
3219	} else {
3220		pff |= NVREG_PFF_MYADDR;
3221
3222		if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3223			u32 alwaysOff[2];
3224			u32 alwaysOn[2];
3225
3226			alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3227			if (dev->flags & IFF_ALLMULTI) {
3228				alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3229			} else {
3230				struct netdev_hw_addr *ha;
3231
3232				netdev_for_each_mc_addr(ha, dev) {
3233					unsigned char *hw_addr = ha->addr;
3234					u32 a, b;
3235
3236					a = le32_to_cpu(*(__le32 *) hw_addr);
3237					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
3238					alwaysOn[0] &= a;
3239					alwaysOff[0] &= ~a;
3240					alwaysOn[1] &= b;
3241					alwaysOff[1] &= ~b;
3242				}
3243			}
3244			addr[0] = alwaysOn[0];
3245			addr[1] = alwaysOn[1];
3246			mask[0] = alwaysOn[0] | alwaysOff[0];
3247			mask[1] = alwaysOn[1] | alwaysOff[1];
3248		} else {
3249			mask[0] = NVREG_MCASTMASKA_NONE;
3250			mask[1] = NVREG_MCASTMASKB_NONE;
3251		}
3252	}
3253	addr[0] |= NVREG_MCASTADDRA_FORCE;
3254	pff |= NVREG_PFF_ALWAYS;
3255	spin_lock_irq(&np->lock);
3256	nv_stop_rx(dev);
3257	writel(addr[0], base + NvRegMulticastAddrA);
3258	writel(addr[1], base + NvRegMulticastAddrB);
3259	writel(mask[0], base + NvRegMulticastMaskA);
3260	writel(mask[1], base + NvRegMulticastMaskB);
3261	writel(pff, base + NvRegPacketFilterFlags);
3262	nv_start_rx(dev);
3263	spin_unlock_irq(&np->lock);
3264}
3265
3266static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3267{
3268	struct fe_priv *np = netdev_priv(dev);
3269	u8 __iomem *base = get_hwbase(dev);
3270
3271	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3272
3273	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3274		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3275		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3276			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3277			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3278		} else {
3279			writel(pff, base + NvRegPacketFilterFlags);
3280		}
3281	}
3282	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3283		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3284		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3285			u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3286			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3287				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3288			if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3289				pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3290				/* limit the number of tx pause frames to a default of 8 */
3291				writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3292			}
3293			writel(pause_enable,  base + NvRegTxPauseFrame);
3294			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3295			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3296		} else {
3297			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
3298			writel(regmisc, base + NvRegMisc1);
3299		}
3300	}
3301}
3302
3303static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3304{
3305	struct fe_priv *np = netdev_priv(dev);
3306	u8 __iomem *base = get_hwbase(dev);
3307	u32 phyreg, txreg;
3308	int mii_status;
3309
3310	np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3311	np->duplex = duplex;
3312
3313	/* see if gigabit phy */
3314	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3315	if (mii_status & PHY_GIGABIT) {
3316		np->gigabit = PHY_GIGABIT;
3317		phyreg = readl(base + NvRegSlotTime);
3318		phyreg &= ~(0x3FF00);
3319		if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3320			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3321		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3322			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3323		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3324			phyreg |= NVREG_SLOTTIME_1000_FULL;
3325		writel(phyreg, base + NvRegSlotTime);
3326	}
3327
3328	phyreg = readl(base + NvRegPhyInterface);
3329	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3330	if (np->duplex == 0)
3331		phyreg |= PHY_HALF;
3332	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3333		phyreg |= PHY_100;
3334	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3335							NVREG_LINKSPEED_1000)
3336		phyreg |= PHY_1000;
3337	writel(phyreg, base + NvRegPhyInterface);
3338
3339	if (phyreg & PHY_RGMII) {
3340		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3341							NVREG_LINKSPEED_1000)
3342			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3343		else
3344			txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3345	} else {
3346		txreg = NVREG_TX_DEFERRAL_DEFAULT;
3347	}
3348	writel(txreg, base + NvRegTxDeferral);
3349
3350	if (np->desc_ver == DESC_VER_1) {
3351		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3352	} else {
3353		if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3354					 NVREG_LINKSPEED_1000)
3355			txreg = NVREG_TX_WM_DESC2_3_1000;
3356		else
3357			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3358	}
3359	writel(txreg, base + NvRegTxWatermark);
3360
3361	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3362			base + NvRegMisc1);
3363	pci_push(base);
3364	writel(np->linkspeed, base + NvRegLinkSpeed);
3365	pci_push(base);
 
 
3366}
3367
3368/**
3369 * nv_update_linkspeed - Setup the MAC according to the link partner
3370 * @dev: Network device to be configured
3371 *
3372 * The function queries the PHY and checks if there is a link partner.
3373 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3374 * set to 10 MBit HD.
3375 *
3376 * The function returns 0 if there is no link partner and 1 if there is
3377 * a good link partner.
3378 */
3379static int nv_update_linkspeed(struct net_device *dev)
3380{
3381	struct fe_priv *np = netdev_priv(dev);
3382	u8 __iomem *base = get_hwbase(dev);
3383	int adv = 0;
3384	int lpa = 0;
3385	int adv_lpa, adv_pause, lpa_pause;
3386	int newls = np->linkspeed;
3387	int newdup = np->duplex;
3388	int mii_status;
3389	u32 bmcr;
3390	int retval = 0;
3391	u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3392	u32 txrxFlags = 0;
3393	u32 phy_exp;
3394
3395	/* If device loopback is enabled, set carrier on and enable max link
3396	 * speed.
3397	 */
3398	bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3399	if (bmcr & BMCR_LOOPBACK) {
3400		if (netif_running(dev)) {
3401			nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3402			if (!netif_carrier_ok(dev))
3403				netif_carrier_on(dev);
3404		}
3405		return 1;
3406	}
3407
3408	/* BMSR_LSTATUS is latched, read it twice:
3409	 * we want the current value.
3410	 */
3411	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3412	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3413
3414	if (!(mii_status & BMSR_LSTATUS)) {
3415		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3416		newdup = 0;
3417		retval = 0;
3418		goto set_speed;
3419	}
3420
3421	if (np->autoneg == 0) {
3422		if (np->fixed_mode & LPA_100FULL) {
3423			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3424			newdup = 1;
3425		} else if (np->fixed_mode & LPA_100HALF) {
3426			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3427			newdup = 0;
3428		} else if (np->fixed_mode & LPA_10FULL) {
3429			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3430			newdup = 1;
3431		} else {
3432			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3433			newdup = 0;
3434		}
3435		retval = 1;
3436		goto set_speed;
3437	}
3438	/* check auto negotiation is complete */
3439	if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3440		/* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3441		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3442		newdup = 0;
3443		retval = 0;
3444		goto set_speed;
3445	}
3446
3447	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3448	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3449
3450	retval = 1;
3451	if (np->gigabit == PHY_GIGABIT) {
3452		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3453		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3454
3455		if ((control_1000 & ADVERTISE_1000FULL) &&
3456			(status_1000 & LPA_1000FULL)) {
3457			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3458			newdup = 1;
3459			goto set_speed;
3460		}
3461	}
3462
3463	/* FIXME: handle parallel detection properly */
3464	adv_lpa = lpa & adv;
3465	if (adv_lpa & LPA_100FULL) {
3466		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3467		newdup = 1;
3468	} else if (adv_lpa & LPA_100HALF) {
3469		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3470		newdup = 0;
3471	} else if (adv_lpa & LPA_10FULL) {
3472		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3473		newdup = 1;
3474	} else if (adv_lpa & LPA_10HALF) {
3475		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3476		newdup = 0;
3477	} else {
3478		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3479		newdup = 0;
3480	}
3481
3482set_speed:
3483	if (np->duplex == newdup && np->linkspeed == newls)
3484		return retval;
3485
3486	np->duplex = newdup;
3487	np->linkspeed = newls;
3488
3489	/* The transmitter and receiver must be restarted for safe update */
3490	if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3491		txrxFlags |= NV_RESTART_TX;
3492		nv_stop_tx(dev);
3493	}
3494	if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3495		txrxFlags |= NV_RESTART_RX;
3496		nv_stop_rx(dev);
3497	}
3498
3499	if (np->gigabit == PHY_GIGABIT) {
3500		phyreg = readl(base + NvRegSlotTime);
3501		phyreg &= ~(0x3FF00);
3502		if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3503		    ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3504			phyreg |= NVREG_SLOTTIME_10_100_FULL;
3505		else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3506			phyreg |= NVREG_SLOTTIME_1000_FULL;
3507		writel(phyreg, base + NvRegSlotTime);
3508	}
3509
3510	phyreg = readl(base + NvRegPhyInterface);
3511	phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3512	if (np->duplex == 0)
3513		phyreg |= PHY_HALF;
3514	if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3515		phyreg |= PHY_100;
3516	else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3517		phyreg |= PHY_1000;
3518	writel(phyreg, base + NvRegPhyInterface);
3519
3520	phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3521	if (phyreg & PHY_RGMII) {
3522		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3523			txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3524		} else {
3525			if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3526				if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3527					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3528				else
3529					txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3530			} else {
3531				txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3532			}
3533		}
3534	} else {
3535		if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3536			txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3537		else
3538			txreg = NVREG_TX_DEFERRAL_DEFAULT;
3539	}
3540	writel(txreg, base + NvRegTxDeferral);
3541
3542	if (np->desc_ver == DESC_VER_1) {
3543		txreg = NVREG_TX_WM_DESC1_DEFAULT;
3544	} else {
3545		if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3546			txreg = NVREG_TX_WM_DESC2_3_1000;
3547		else
3548			txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3549	}
3550	writel(txreg, base + NvRegTxWatermark);
3551
3552	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3553		base + NvRegMisc1);
3554	pci_push(base);
3555	writel(np->linkspeed, base + NvRegLinkSpeed);
3556	pci_push(base);
3557
3558	pause_flags = 0;
3559	/* setup pause frame */
3560	if (netif_running(dev) && (np->duplex != 0)) {
3561		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3562			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3563			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3564
3565			switch (adv_pause) {
3566			case ADVERTISE_PAUSE_CAP:
3567				if (lpa_pause & LPA_PAUSE_CAP) {
3568					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3569					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3570						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3571				}
3572				break;
3573			case ADVERTISE_PAUSE_ASYM:
3574				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3575					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3576				break;
3577			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3578				if (lpa_pause & LPA_PAUSE_CAP) {
3579					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
3580					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3581						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3582				}
3583				if (lpa_pause == LPA_PAUSE_ASYM)
3584					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3585				break;
3586			}
3587		} else {
3588			pause_flags = np->pause_flags;
3589		}
3590	}
3591	nv_update_pause(dev, pause_flags);
3592
3593	if (txrxFlags & NV_RESTART_TX)
3594		nv_start_tx(dev);
3595	if (txrxFlags & NV_RESTART_RX)
3596		nv_start_rx(dev);
3597
3598	return retval;
3599}
3600
3601static void nv_linkchange(struct net_device *dev)
3602{
3603	if (nv_update_linkspeed(dev)) {
3604		if (!netif_carrier_ok(dev)) {
3605			netif_carrier_on(dev);
3606			netdev_info(dev, "link up\n");
3607			nv_txrx_gate(dev, false);
3608			nv_start_rx(dev);
3609		}
3610	} else {
3611		if (netif_carrier_ok(dev)) {
3612			netif_carrier_off(dev);
3613			netdev_info(dev, "link down\n");
3614			nv_txrx_gate(dev, true);
3615			nv_stop_rx(dev);
3616		}
3617	}
3618}
3619
3620static void nv_link_irq(struct net_device *dev)
3621{
3622	u8 __iomem *base = get_hwbase(dev);
3623	u32 miistat;
3624
3625	miistat = readl(base + NvRegMIIStatus);
3626	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3627
3628	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3629		nv_linkchange(dev);
3630}
3631
3632static void nv_msi_workaround(struct fe_priv *np)
3633{
3634
3635	/* Need to toggle the msi irq mask within the ethernet device,
3636	 * otherwise, future interrupts will not be detected.
3637	 */
3638	if (np->msi_flags & NV_MSI_ENABLED) {
3639		u8 __iomem *base = np->base;
3640
3641		writel(0, base + NvRegMSIIrqMask);
3642		writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3643	}
3644}
3645
3646static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3647{
3648	struct fe_priv *np = netdev_priv(dev);
3649
3650	if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3651		if (total_work > NV_DYNAMIC_THRESHOLD) {
3652			/* transition to poll based interrupts */
3653			np->quiet_count = 0;
3654			if (np->irqmask != NVREG_IRQMASK_CPU) {
3655				np->irqmask = NVREG_IRQMASK_CPU;
3656				return 1;
3657			}
3658		} else {
3659			if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3660				np->quiet_count++;
3661			} else {
3662				/* reached a period of low activity, switch
3663				   to per tx/rx packet interrupts */
3664				if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3665					np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3666					return 1;
3667				}
3668			}
3669		}
3670	}
3671	return 0;
3672}
3673
3674static irqreturn_t nv_nic_irq(int foo, void *data)
3675{
3676	struct net_device *dev = (struct net_device *) data;
3677	struct fe_priv *np = netdev_priv(dev);
3678	u8 __iomem *base = get_hwbase(dev);
3679
3680	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3681		np->events = readl(base + NvRegIrqStatus);
3682		writel(np->events, base + NvRegIrqStatus);
3683	} else {
3684		np->events = readl(base + NvRegMSIXIrqStatus);
3685		writel(np->events, base + NvRegMSIXIrqStatus);
3686	}
3687	if (!(np->events & np->irqmask))
3688		return IRQ_NONE;
3689
3690	nv_msi_workaround(np);
3691
3692	if (napi_schedule_prep(&np->napi)) {
3693		/*
3694		 * Disable further irq's (msix not enabled with napi)
3695		 */
3696		writel(0, base + NvRegIrqMask);
3697		__napi_schedule(&np->napi);
3698	}
3699
3700	return IRQ_HANDLED;
3701}
3702
3703/* All _optimized functions are used to help increase performance
3704 * (reduce CPU and increase throughput). They use descripter version 3,
3705 * compiler directives, and reduce memory accesses.
3706 */
3707static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3708{
3709	struct net_device *dev = (struct net_device *) data;
3710	struct fe_priv *np = netdev_priv(dev);
3711	u8 __iomem *base = get_hwbase(dev);
3712
3713	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3714		np->events = readl(base + NvRegIrqStatus);
3715		writel(np->events, base + NvRegIrqStatus);
3716	} else {
3717		np->events = readl(base + NvRegMSIXIrqStatus);
3718		writel(np->events, base + NvRegMSIXIrqStatus);
3719	}
3720	if (!(np->events & np->irqmask))
3721		return IRQ_NONE;
3722
3723	nv_msi_workaround(np);
3724
3725	if (napi_schedule_prep(&np->napi)) {
3726		/*
3727		 * Disable further irq's (msix not enabled with napi)
3728		 */
3729		writel(0, base + NvRegIrqMask);
3730		__napi_schedule(&np->napi);
3731	}
3732
3733	return IRQ_HANDLED;
3734}
3735
3736static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3737{
3738	struct net_device *dev = (struct net_device *) data;
3739	struct fe_priv *np = netdev_priv(dev);
3740	u8 __iomem *base = get_hwbase(dev);
3741	u32 events;
3742	int i;
3743	unsigned long flags;
3744
3745	for (i = 0;; i++) {
3746		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3747		writel(events, base + NvRegMSIXIrqStatus);
3748		netdev_dbg(dev, "tx irq events: %08x\n", events);
3749		if (!(events & np->irqmask))
3750			break;
3751
3752		spin_lock_irqsave(&np->lock, flags);
3753		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3754		spin_unlock_irqrestore(&np->lock, flags);
3755
3756		if (unlikely(i > max_interrupt_work)) {
3757			spin_lock_irqsave(&np->lock, flags);
3758			/* disable interrupts on the nic */
3759			writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3760			pci_push(base);
3761
3762			if (!np->in_shutdown) {
3763				np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3764				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3765			}
3766			spin_unlock_irqrestore(&np->lock, flags);
3767			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3768				   __func__, i);
3769			break;
3770		}
3771
3772	}
3773
3774	return IRQ_RETVAL(i);
3775}
3776
3777static int nv_napi_poll(struct napi_struct *napi, int budget)
3778{
3779	struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3780	struct net_device *dev = np->dev;
3781	u8 __iomem *base = get_hwbase(dev);
3782	unsigned long flags;
3783	int retcode;
3784	int rx_count, tx_work = 0, rx_work = 0;
3785
3786	do {
3787		if (!nv_optimized(np)) {
3788			spin_lock_irqsave(&np->lock, flags);
3789			tx_work += nv_tx_done(dev, np->tx_ring_size);
3790			spin_unlock_irqrestore(&np->lock, flags);
3791
3792			rx_count = nv_rx_process(dev, budget - rx_work);
3793			retcode = nv_alloc_rx(dev);
3794		} else {
3795			spin_lock_irqsave(&np->lock, flags);
3796			tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3797			spin_unlock_irqrestore(&np->lock, flags);
3798
3799			rx_count = nv_rx_process_optimized(dev,
3800			    budget - rx_work);
3801			retcode = nv_alloc_rx_optimized(dev);
3802		}
3803	} while (retcode == 0 &&
3804		 rx_count > 0 && (rx_work += rx_count) < budget);
3805
3806	if (retcode) {
3807		spin_lock_irqsave(&np->lock, flags);
3808		if (!np->in_shutdown)
3809			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3810		spin_unlock_irqrestore(&np->lock, flags);
3811	}
3812
3813	nv_change_interrupt_mode(dev, tx_work + rx_work);
3814
3815	if (unlikely(np->events & NVREG_IRQ_LINK)) {
3816		spin_lock_irqsave(&np->lock, flags);
3817		nv_link_irq(dev);
3818		spin_unlock_irqrestore(&np->lock, flags);
3819	}
3820	if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3821		spin_lock_irqsave(&np->lock, flags);
3822		nv_linkchange(dev);
3823		spin_unlock_irqrestore(&np->lock, flags);
3824		np->link_timeout = jiffies + LINK_TIMEOUT;
3825	}
3826	if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3827		spin_lock_irqsave(&np->lock, flags);
3828		if (!np->in_shutdown) {
3829			np->nic_poll_irq = np->irqmask;
3830			np->recover_error = 1;
3831			mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3832		}
3833		spin_unlock_irqrestore(&np->lock, flags);
3834		napi_complete(napi);
3835		return rx_work;
3836	}
3837
3838	if (rx_work < budget) {
3839		/* re-enable interrupts
3840		   (msix not enabled in napi) */
3841		napi_complete_done(napi, rx_work);
3842
3843		writel(np->irqmask, base + NvRegIrqMask);
3844	}
3845	return rx_work;
3846}
3847
3848static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3849{
3850	struct net_device *dev = (struct net_device *) data;
3851	struct fe_priv *np = netdev_priv(dev);
3852	u8 __iomem *base = get_hwbase(dev);
3853	u32 events;
3854	int i;
3855	unsigned long flags;
3856
3857	for (i = 0;; i++) {
3858		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3859		writel(events, base + NvRegMSIXIrqStatus);
3860		netdev_dbg(dev, "rx irq events: %08x\n", events);
3861		if (!(events & np->irqmask))
3862			break;
3863
3864		if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3865			if (unlikely(nv_alloc_rx_optimized(dev))) {
3866				spin_lock_irqsave(&np->lock, flags);
3867				if (!np->in_shutdown)
3868					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3869				spin_unlock_irqrestore(&np->lock, flags);
3870			}
3871		}
3872
3873		if (unlikely(i > max_interrupt_work)) {
3874			spin_lock_irqsave(&np->lock, flags);
3875			/* disable interrupts on the nic */
3876			writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3877			pci_push(base);
3878
3879			if (!np->in_shutdown) {
3880				np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3881				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3882			}
3883			spin_unlock_irqrestore(&np->lock, flags);
3884			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3885				   __func__, i);
3886			break;
3887		}
3888	}
3889
3890	return IRQ_RETVAL(i);
3891}
3892
3893static irqreturn_t nv_nic_irq_other(int foo, void *data)
3894{
3895	struct net_device *dev = (struct net_device *) data;
3896	struct fe_priv *np = netdev_priv(dev);
3897	u8 __iomem *base = get_hwbase(dev);
3898	u32 events;
3899	int i;
3900	unsigned long flags;
3901
3902	for (i = 0;; i++) {
3903		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3904		writel(events, base + NvRegMSIXIrqStatus);
3905		netdev_dbg(dev, "irq events: %08x\n", events);
3906		if (!(events & np->irqmask))
3907			break;
3908
3909		/* check tx in case we reached max loop limit in tx isr */
3910		spin_lock_irqsave(&np->lock, flags);
3911		nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3912		spin_unlock_irqrestore(&np->lock, flags);
3913
3914		if (events & NVREG_IRQ_LINK) {
3915			spin_lock_irqsave(&np->lock, flags);
3916			nv_link_irq(dev);
3917			spin_unlock_irqrestore(&np->lock, flags);
3918		}
3919		if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3920			spin_lock_irqsave(&np->lock, flags);
3921			nv_linkchange(dev);
3922			spin_unlock_irqrestore(&np->lock, flags);
3923			np->link_timeout = jiffies + LINK_TIMEOUT;
3924		}
3925		if (events & NVREG_IRQ_RECOVER_ERROR) {
3926			spin_lock_irqsave(&np->lock, flags);
3927			/* disable interrupts on the nic */
3928			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3929			pci_push(base);
3930
3931			if (!np->in_shutdown) {
3932				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3933				np->recover_error = 1;
3934				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3935			}
3936			spin_unlock_irqrestore(&np->lock, flags);
3937			break;
3938		}
3939		if (unlikely(i > max_interrupt_work)) {
3940			spin_lock_irqsave(&np->lock, flags);
3941			/* disable interrupts on the nic */
3942			writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3943			pci_push(base);
3944
3945			if (!np->in_shutdown) {
3946				np->nic_poll_irq |= NVREG_IRQ_OTHER;
3947				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3948			}
3949			spin_unlock_irqrestore(&np->lock, flags);
3950			netdev_dbg(dev, "%s: too many iterations (%d)\n",
3951				   __func__, i);
3952			break;
3953		}
3954
3955	}
3956
3957	return IRQ_RETVAL(i);
3958}
3959
3960static irqreturn_t nv_nic_irq_test(int foo, void *data)
3961{
3962	struct net_device *dev = (struct net_device *) data;
3963	struct fe_priv *np = netdev_priv(dev);
3964	u8 __iomem *base = get_hwbase(dev);
3965	u32 events;
3966
3967	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3968		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3969		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3970	} else {
3971		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3972		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3973	}
3974	pci_push(base);
3975	if (!(events & NVREG_IRQ_TIMER))
3976		return IRQ_RETVAL(0);
3977
3978	nv_msi_workaround(np);
3979
3980	spin_lock(&np->lock);
3981	np->intr_test = 1;
3982	spin_unlock(&np->lock);
3983
3984	return IRQ_RETVAL(1);
3985}
3986
3987static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3988{
3989	u8 __iomem *base = get_hwbase(dev);
3990	int i;
3991	u32 msixmap = 0;
3992
3993	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3994	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3995	 * the remaining 8 interrupts.
3996	 */
3997	for (i = 0; i < 8; i++) {
3998		if ((irqmask >> i) & 0x1)
3999			msixmap |= vector << (i << 2);
4000	}
4001	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
4002
4003	msixmap = 0;
4004	for (i = 0; i < 8; i++) {
4005		if ((irqmask >> (i + 8)) & 0x1)
4006			msixmap |= vector << (i << 2);
4007	}
4008	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
4009}
4010
4011static int nv_request_irq(struct net_device *dev, int intr_test)
4012{
4013	struct fe_priv *np = get_nvpriv(dev);
4014	u8 __iomem *base = get_hwbase(dev);
4015	int ret;
4016	int i;
4017	irqreturn_t (*handler)(int foo, void *data);
4018
4019	if (intr_test) {
4020		handler = nv_nic_irq_test;
4021	} else {
4022		if (nv_optimized(np))
4023			handler = nv_nic_irq_optimized;
4024		else
4025			handler = nv_nic_irq;
4026	}
4027
4028	if (np->msi_flags & NV_MSI_X_CAPABLE) {
4029		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4030			np->msi_x_entry[i].entry = i;
4031		ret = pci_enable_msix_range(np->pci_dev,
4032					    np->msi_x_entry,
4033					    np->msi_flags & NV_MSI_X_VECTORS_MASK,
4034					    np->msi_flags & NV_MSI_X_VECTORS_MASK);
4035		if (ret > 0) {
4036			np->msi_flags |= NV_MSI_X_ENABLED;
4037			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
4038				/* Request irq for rx handling */
4039				sprintf(np->name_rx, "%s-rx", dev->name);
4040				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
4041						  nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
4042				if (ret) {
4043					netdev_info(dev,
4044						    "request_irq failed for rx %d\n",
4045						    ret);
4046					pci_disable_msix(np->pci_dev);
4047					np->msi_flags &= ~NV_MSI_X_ENABLED;
4048					goto out_err;
4049				}
4050				/* Request irq for tx handling */
4051				sprintf(np->name_tx, "%s-tx", dev->name);
4052				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
4053						  nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
4054				if (ret) {
4055					netdev_info(dev,
4056						    "request_irq failed for tx %d\n",
4057						    ret);
4058					pci_disable_msix(np->pci_dev);
4059					np->msi_flags &= ~NV_MSI_X_ENABLED;
4060					goto out_free_rx;
4061				}
4062				/* Request irq for link and timer handling */
4063				sprintf(np->name_other, "%s-other", dev->name);
4064				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
4065						  nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
4066				if (ret) {
4067					netdev_info(dev,
4068						    "request_irq failed for link %d\n",
4069						    ret);
4070					pci_disable_msix(np->pci_dev);
4071					np->msi_flags &= ~NV_MSI_X_ENABLED;
4072					goto out_free_tx;
4073				}
4074				/* map interrupts to their respective vector */
4075				writel(0, base + NvRegMSIXMap0);
4076				writel(0, base + NvRegMSIXMap1);
4077				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
4078				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
4079				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
4080			} else {
4081				/* Request irq for all interrupts */
4082				ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4083						  handler, IRQF_SHARED, dev->name, dev);
4084				if (ret) {
4085					netdev_info(dev,
4086						    "request_irq failed %d\n",
4087						    ret);
4088					pci_disable_msix(np->pci_dev);
4089					np->msi_flags &= ~NV_MSI_X_ENABLED;
4090					goto out_err;
4091				}
4092
4093				/* map interrupts to vector 0 */
4094				writel(0, base + NvRegMSIXMap0);
4095				writel(0, base + NvRegMSIXMap1);
4096			}
4097			netdev_info(dev, "MSI-X enabled\n");
4098			return 0;
4099		}
4100	}
4101	if (np->msi_flags & NV_MSI_CAPABLE) {
4102		ret = pci_enable_msi(np->pci_dev);
4103		if (ret == 0) {
4104			np->msi_flags |= NV_MSI_ENABLED;
4105			ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4106			if (ret) {
4107				netdev_info(dev, "request_irq failed %d\n",
4108					    ret);
4109				pci_disable_msi(np->pci_dev);
4110				np->msi_flags &= ~NV_MSI_ENABLED;
4111				goto out_err;
4112			}
4113
4114			/* map interrupts to vector 0 */
4115			writel(0, base + NvRegMSIMap0);
4116			writel(0, base + NvRegMSIMap1);
4117			/* enable msi vector 0 */
4118			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4119			netdev_info(dev, "MSI enabled\n");
4120			return 0;
4121		}
4122	}
4123
4124	if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4125		goto out_err;
4126
4127	return 0;
4128out_free_tx:
4129	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
4130out_free_rx:
4131	free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
4132out_err:
4133	return 1;
4134}
4135
4136static void nv_free_irq(struct net_device *dev)
4137{
4138	struct fe_priv *np = get_nvpriv(dev);
4139	int i;
4140
4141	if (np->msi_flags & NV_MSI_X_ENABLED) {
4142		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
4143			free_irq(np->msi_x_entry[i].vector, dev);
4144		pci_disable_msix(np->pci_dev);
4145		np->msi_flags &= ~NV_MSI_X_ENABLED;
4146	} else {
4147		free_irq(np->pci_dev->irq, dev);
4148		if (np->msi_flags & NV_MSI_ENABLED) {
4149			pci_disable_msi(np->pci_dev);
4150			np->msi_flags &= ~NV_MSI_ENABLED;
4151		}
4152	}
4153}
4154
4155static void nv_do_nic_poll(struct timer_list *t)
4156{
4157	struct fe_priv *np = from_timer(np, t, nic_poll);
4158	struct net_device *dev = np->dev;
4159	u8 __iomem *base = get_hwbase(dev);
4160	u32 mask = 0;
4161	unsigned long flags;
4162	unsigned int irq = 0;
4163
4164	/*
4165	 * First disable irq(s) and then
4166	 * reenable interrupts on the nic, we have to do this before calling
4167	 * nv_nic_irq because that may decide to do otherwise
4168	 */
4169
4170	if (!using_multi_irqs(dev)) {
4171		if (np->msi_flags & NV_MSI_X_ENABLED)
4172			irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector;
4173		else
4174			irq = np->pci_dev->irq;
4175		mask = np->irqmask;
4176	} else {
4177		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4178			irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector;
4179			mask |= NVREG_IRQ_RX_ALL;
4180		}
4181		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4182			irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector;
4183			mask |= NVREG_IRQ_TX_ALL;
4184		}
4185		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4186			irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector;
4187			mask |= NVREG_IRQ_OTHER;
4188		}
4189	}
4190
4191	disable_irq_nosync_lockdep_irqsave(irq, &flags);
4192	synchronize_irq(irq);
4193
4194	if (np->recover_error) {
4195		np->recover_error = 0;
4196		netdev_info(dev, "MAC in recoverable error state\n");
4197		if (netif_running(dev)) {
4198			netif_tx_lock_bh(dev);
4199			netif_addr_lock(dev);
4200			spin_lock(&np->lock);
4201			/* stop engines */
4202			nv_stop_rxtx(dev);
4203			if (np->driver_data & DEV_HAS_POWER_CNTRL)
4204				nv_mac_reset(dev);
4205			nv_txrx_reset(dev);
4206			/* drain rx queue */
4207			nv_drain_rxtx(dev);
4208			/* reinit driver view of the rx queue */
4209			set_bufsize(dev);
4210			if (nv_init_ring(dev)) {
4211				if (!np->in_shutdown)
4212					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4213			}
4214			/* reinit nic view of the rx queue */
4215			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4216			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4217			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4218				base + NvRegRingSizes);
4219			pci_push(base);
4220			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4221			pci_push(base);
4222			/* clear interrupts */
4223			if (!(np->msi_flags & NV_MSI_X_ENABLED))
4224				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4225			else
4226				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4227
4228			/* restart rx engine */
4229			nv_start_rxtx(dev);
4230			spin_unlock(&np->lock);
4231			netif_addr_unlock(dev);
4232			netif_tx_unlock_bh(dev);
4233		}
4234	}
4235
4236	writel(mask, base + NvRegIrqMask);
4237	pci_push(base);
4238
4239	if (!using_multi_irqs(dev)) {
4240		np->nic_poll_irq = 0;
4241		if (nv_optimized(np))
4242			nv_nic_irq_optimized(0, dev);
4243		else
4244			nv_nic_irq(0, dev);
 
 
 
 
4245	} else {
4246		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
4247			np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
4248			nv_nic_irq_rx(0, dev);
 
4249		}
4250		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
4251			np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
4252			nv_nic_irq_tx(0, dev);
 
4253		}
4254		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
4255			np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
4256			nv_nic_irq_other(0, dev);
 
4257		}
4258	}
4259
4260	enable_irq_lockdep_irqrestore(irq, &flags);
4261}
4262
4263#ifdef CONFIG_NET_POLL_CONTROLLER
4264static void nv_poll_controller(struct net_device *dev)
4265{
4266	struct fe_priv *np = netdev_priv(dev);
4267
4268	nv_do_nic_poll(&np->nic_poll);
4269}
4270#endif
4271
4272static void nv_do_stats_poll(struct timer_list *t)
4273	__acquires(&netdev_priv(dev)->hwstats_lock)
4274	__releases(&netdev_priv(dev)->hwstats_lock)
4275{
4276	struct fe_priv *np = from_timer(np, t, stats_poll);
4277	struct net_device *dev = np->dev;
4278
4279	/* If lock is currently taken, the stats are being refreshed
4280	 * and hence fresh enough */
4281	if (spin_trylock(&np->hwstats_lock)) {
4282		nv_update_stats(dev);
4283		spin_unlock(&np->hwstats_lock);
4284	}
4285
4286	if (!np->in_shutdown)
4287		mod_timer(&np->stats_poll,
4288			round_jiffies(jiffies + STATS_INTERVAL));
4289}
4290
4291static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4292{
4293	struct fe_priv *np = netdev_priv(dev);
4294	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
4295	strscpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
4296	strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
4297}
4298
4299static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4300{
4301	struct fe_priv *np = netdev_priv(dev);
4302	wolinfo->supported = WAKE_MAGIC;
4303
4304	spin_lock_irq(&np->lock);
4305	if (np->wolenabled)
4306		wolinfo->wolopts = WAKE_MAGIC;
4307	spin_unlock_irq(&np->lock);
4308}
4309
4310static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4311{
4312	struct fe_priv *np = netdev_priv(dev);
4313	u8 __iomem *base = get_hwbase(dev);
4314	u32 flags = 0;
4315
4316	if (wolinfo->wolopts == 0) {
4317		np->wolenabled = 0;
4318	} else if (wolinfo->wolopts & WAKE_MAGIC) {
4319		np->wolenabled = 1;
4320		flags = NVREG_WAKEUPFLAGS_ENABLE;
4321	}
4322	if (netif_running(dev)) {
4323		spin_lock_irq(&np->lock);
4324		writel(flags, base + NvRegWakeUpFlags);
4325		spin_unlock_irq(&np->lock);
4326	}
4327	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4328	return 0;
4329}
4330
4331static int nv_get_link_ksettings(struct net_device *dev,
4332				 struct ethtool_link_ksettings *cmd)
4333{
4334	struct fe_priv *np = netdev_priv(dev);
4335	u32 speed, supported, advertising;
4336	int adv;
4337
4338	spin_lock_irq(&np->lock);
4339	cmd->base.port = PORT_MII;
4340	if (!netif_running(dev)) {
4341		/* We do not track link speed / duplex setting if the
4342		 * interface is disabled. Force a link check */
4343		if (nv_update_linkspeed(dev)) {
4344			netif_carrier_on(dev);
 
4345		} else {
4346			netif_carrier_off(dev);
 
4347		}
4348	}
4349
4350	if (netif_carrier_ok(dev)) {
4351		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4352		case NVREG_LINKSPEED_10:
4353			speed = SPEED_10;
4354			break;
4355		case NVREG_LINKSPEED_100:
4356			speed = SPEED_100;
4357			break;
4358		case NVREG_LINKSPEED_1000:
4359			speed = SPEED_1000;
4360			break;
4361		default:
4362			speed = -1;
4363			break;
4364		}
4365		cmd->base.duplex = DUPLEX_HALF;
4366		if (np->duplex)
4367			cmd->base.duplex = DUPLEX_FULL;
4368	} else {
4369		speed = SPEED_UNKNOWN;
4370		cmd->base.duplex = DUPLEX_UNKNOWN;
4371	}
4372	cmd->base.speed = speed;
4373	cmd->base.autoneg = np->autoneg;
4374
4375	advertising = ADVERTISED_MII;
4376	if (np->autoneg) {
4377		advertising |= ADVERTISED_Autoneg;
4378		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4379		if (adv & ADVERTISE_10HALF)
4380			advertising |= ADVERTISED_10baseT_Half;
4381		if (adv & ADVERTISE_10FULL)
4382			advertising |= ADVERTISED_10baseT_Full;
4383		if (adv & ADVERTISE_100HALF)
4384			advertising |= ADVERTISED_100baseT_Half;
4385		if (adv & ADVERTISE_100FULL)
4386			advertising |= ADVERTISED_100baseT_Full;
4387		if (np->gigabit == PHY_GIGABIT) {
4388			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4389			if (adv & ADVERTISE_1000FULL)
4390				advertising |= ADVERTISED_1000baseT_Full;
4391		}
4392	}
4393	supported = (SUPPORTED_Autoneg |
4394		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4395		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4396		SUPPORTED_MII);
4397	if (np->gigabit == PHY_GIGABIT)
4398		supported |= SUPPORTED_1000baseT_Full;
4399
4400	cmd->base.phy_address = np->phyaddr;
4401
4402	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4403						supported);
4404	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4405						advertising);
4406
4407	/* ignore maxtxpkt, maxrxpkt for now */
4408	spin_unlock_irq(&np->lock);
4409	return 0;
4410}
4411
4412static int nv_set_link_ksettings(struct net_device *dev,
4413				 const struct ethtool_link_ksettings *cmd)
4414{
4415	struct fe_priv *np = netdev_priv(dev);
4416	u32 speed = cmd->base.speed;
4417	u32 advertising;
4418
4419	ethtool_convert_link_mode_to_legacy_u32(&advertising,
4420						cmd->link_modes.advertising);
4421
4422	if (cmd->base.port != PORT_MII)
4423		return -EINVAL;
4424	if (cmd->base.phy_address != np->phyaddr) {
4425		/* TODO: support switching between multiple phys. Should be
4426		 * trivial, but not enabled due to lack of test hardware. */
4427		return -EINVAL;
4428	}
4429	if (cmd->base.autoneg == AUTONEG_ENABLE) {
4430		u32 mask;
4431
4432		mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4433			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4434		if (np->gigabit == PHY_GIGABIT)
4435			mask |= ADVERTISED_1000baseT_Full;
4436
4437		if ((advertising & mask) == 0)
4438			return -EINVAL;
4439
4440	} else if (cmd->base.autoneg == AUTONEG_DISABLE) {
4441		/* Note: autonegotiation disable, speed 1000 intentionally
4442		 * forbidden - no one should need that. */
4443
4444		if (speed != SPEED_10 && speed != SPEED_100)
4445			return -EINVAL;
4446		if (cmd->base.duplex != DUPLEX_HALF &&
4447		    cmd->base.duplex != DUPLEX_FULL)
4448			return -EINVAL;
4449	} else {
4450		return -EINVAL;
4451	}
4452
4453	netif_carrier_off(dev);
4454	if (netif_running(dev)) {
4455		unsigned long flags;
4456
4457		nv_disable_irq(dev);
4458		netif_tx_lock_bh(dev);
4459		netif_addr_lock(dev);
4460		/* with plain spinlock lockdep complains */
4461		spin_lock_irqsave(&np->lock, flags);
4462		/* stop engines */
4463		/* FIXME:
4464		 * this can take some time, and interrupts are disabled
4465		 * due to spin_lock_irqsave, but let's hope no daemon
4466		 * is going to change the settings very often...
4467		 * Worst case:
4468		 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4469		 * + some minor delays, which is up to a second approximately
4470		 */
4471		nv_stop_rxtx(dev);
4472		spin_unlock_irqrestore(&np->lock, flags);
4473		netif_addr_unlock(dev);
4474		netif_tx_unlock_bh(dev);
4475	}
4476
4477	if (cmd->base.autoneg == AUTONEG_ENABLE) {
4478		int adv, bmcr;
4479
4480		np->autoneg = 1;
4481
4482		/* advertise only what has been requested */
4483		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4484		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4485		if (advertising & ADVERTISED_10baseT_Half)
4486			adv |= ADVERTISE_10HALF;
4487		if (advertising & ADVERTISED_10baseT_Full)
4488			adv |= ADVERTISE_10FULL;
4489		if (advertising & ADVERTISED_100baseT_Half)
4490			adv |= ADVERTISE_100HALF;
4491		if (advertising & ADVERTISED_100baseT_Full)
4492			adv |= ADVERTISE_100FULL;
4493		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
4494			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4495		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4496			adv |=  ADVERTISE_PAUSE_ASYM;
4497		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4498
4499		if (np->gigabit == PHY_GIGABIT) {
4500			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4501			adv &= ~ADVERTISE_1000FULL;
4502			if (advertising & ADVERTISED_1000baseT_Full)
4503				adv |= ADVERTISE_1000FULL;
4504			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4505		}
4506
4507		if (netif_running(dev))
4508			netdev_info(dev, "link down\n");
4509		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4510		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4511			bmcr |= BMCR_ANENABLE;
4512			/* reset the phy in order for settings to stick,
4513			 * and cause autoneg to start */
4514			if (phy_reset(dev, bmcr)) {
4515				netdev_info(dev, "phy reset failed\n");
4516				return -EINVAL;
4517			}
4518		} else {
4519			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4520			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4521		}
4522	} else {
4523		int adv, bmcr;
4524
4525		np->autoneg = 0;
4526
4527		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4528		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4529		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
4530			adv |= ADVERTISE_10HALF;
4531		if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
4532			adv |= ADVERTISE_10FULL;
4533		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
4534			adv |= ADVERTISE_100HALF;
4535		if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
4536			adv |= ADVERTISE_100FULL;
4537		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4538		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4539			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4540			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4541		}
4542		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4543			adv |=  ADVERTISE_PAUSE_ASYM;
4544			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4545		}
4546		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4547		np->fixed_mode = adv;
4548
4549		if (np->gigabit == PHY_GIGABIT) {
4550			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4551			adv &= ~ADVERTISE_1000FULL;
4552			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4553		}
4554
4555		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4556		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4557		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4558			bmcr |= BMCR_FULLDPLX;
4559		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4560			bmcr |= BMCR_SPEED100;
4561		if (np->phy_oui == PHY_OUI_MARVELL) {
4562			/* reset the phy in order for forced mode settings to stick */
4563			if (phy_reset(dev, bmcr)) {
4564				netdev_info(dev, "phy reset failed\n");
4565				return -EINVAL;
4566			}
4567		} else {
4568			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4569			if (netif_running(dev)) {
4570				/* Wait a bit and then reconfigure the nic. */
4571				udelay(10);
4572				nv_linkchange(dev);
4573			}
4574		}
4575	}
4576
4577	if (netif_running(dev)) {
4578		nv_start_rxtx(dev);
4579		nv_enable_irq(dev);
4580	}
4581
4582	return 0;
4583}
4584
4585#define FORCEDETH_REGS_VER	1
4586
4587static int nv_get_regs_len(struct net_device *dev)
4588{
4589	struct fe_priv *np = netdev_priv(dev);
4590	return np->register_size;
4591}
4592
4593static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4594{
4595	struct fe_priv *np = netdev_priv(dev);
4596	u8 __iomem *base = get_hwbase(dev);
4597	u32 *rbuf = buf;
4598	int i;
4599
4600	regs->version = FORCEDETH_REGS_VER;
4601	spin_lock_irq(&np->lock);
4602	for (i = 0; i < np->register_size/sizeof(u32); i++)
4603		rbuf[i] = readl(base + i*sizeof(u32));
4604	spin_unlock_irq(&np->lock);
4605}
4606
4607static int nv_nway_reset(struct net_device *dev)
4608{
4609	struct fe_priv *np = netdev_priv(dev);
4610	int ret;
4611
4612	if (np->autoneg) {
4613		int bmcr;
4614
4615		netif_carrier_off(dev);
4616		if (netif_running(dev)) {
4617			nv_disable_irq(dev);
4618			netif_tx_lock_bh(dev);
4619			netif_addr_lock(dev);
4620			spin_lock(&np->lock);
4621			/* stop engines */
4622			nv_stop_rxtx(dev);
4623			spin_unlock(&np->lock);
4624			netif_addr_unlock(dev);
4625			netif_tx_unlock_bh(dev);
4626			netdev_info(dev, "link down\n");
4627		}
4628
4629		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4630		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4631			bmcr |= BMCR_ANENABLE;
4632			/* reset the phy in order for settings to stick*/
4633			if (phy_reset(dev, bmcr)) {
4634				netdev_info(dev, "phy reset failed\n");
4635				return -EINVAL;
4636			}
4637		} else {
4638			bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4639			mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4640		}
4641
4642		if (netif_running(dev)) {
4643			nv_start_rxtx(dev);
4644			nv_enable_irq(dev);
4645		}
4646		ret = 0;
4647	} else {
4648		ret = -EINVAL;
4649	}
4650
4651	return ret;
4652}
4653
4654static void nv_get_ringparam(struct net_device *dev,
4655			     struct ethtool_ringparam *ring,
4656			     struct kernel_ethtool_ringparam *kernel_ring,
4657			     struct netlink_ext_ack *extack)
4658{
4659	struct fe_priv *np = netdev_priv(dev);
4660
4661	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4662	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4663
4664	ring->rx_pending = np->rx_ring_size;
4665	ring->tx_pending = np->tx_ring_size;
4666}
4667
4668static int nv_set_ringparam(struct net_device *dev,
4669			    struct ethtool_ringparam *ring,
4670			    struct kernel_ethtool_ringparam *kernel_ring,
4671			    struct netlink_ext_ack *extack)
4672{
4673	struct fe_priv *np = netdev_priv(dev);
4674	u8 __iomem *base = get_hwbase(dev);
4675	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4676	dma_addr_t ring_addr;
4677
4678	if (ring->rx_pending < RX_RING_MIN ||
4679	    ring->tx_pending < TX_RING_MIN ||
4680	    ring->rx_mini_pending != 0 ||
4681	    ring->rx_jumbo_pending != 0 ||
4682	    (np->desc_ver == DESC_VER_1 &&
4683	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4684	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4685	    (np->desc_ver != DESC_VER_1 &&
4686	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4687	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4688		return -EINVAL;
4689	}
4690
4691	/* allocate new rings */
4692	if (!nv_optimized(np)) {
4693		rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4694					       sizeof(struct ring_desc) *
4695					       (ring->rx_pending +
4696					       ring->tx_pending),
4697					       &ring_addr, GFP_ATOMIC);
4698	} else {
4699		rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev,
4700					       sizeof(struct ring_desc_ex) *
4701					       (ring->rx_pending +
4702					       ring->tx_pending),
4703					       &ring_addr, GFP_ATOMIC);
4704	}
4705	rx_skbuff = kmalloc_array(ring->rx_pending, sizeof(struct nv_skb_map),
4706				  GFP_KERNEL);
4707	tx_skbuff = kmalloc_array(ring->tx_pending, sizeof(struct nv_skb_map),
4708				  GFP_KERNEL);
4709	if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4710		/* fall back to old rings */
4711		if (!nv_optimized(np)) {
4712			if (rxtx_ring)
4713				dma_free_coherent(&np->pci_dev->dev,
4714						  sizeof(struct ring_desc) *
4715						  (ring->rx_pending +
4716						  ring->tx_pending),
4717						  rxtx_ring, ring_addr);
4718		} else {
4719			if (rxtx_ring)
4720				dma_free_coherent(&np->pci_dev->dev,
4721						  sizeof(struct ring_desc_ex) *
4722						  (ring->rx_pending +
4723						  ring->tx_pending),
4724						  rxtx_ring, ring_addr);
4725		}
4726
4727		kfree(rx_skbuff);
4728		kfree(tx_skbuff);
4729		goto exit;
4730	}
4731
4732	if (netif_running(dev)) {
4733		nv_disable_irq(dev);
4734		nv_napi_disable(dev);
4735		netif_tx_lock_bh(dev);
4736		netif_addr_lock(dev);
4737		spin_lock(&np->lock);
4738		/* stop engines */
4739		nv_stop_rxtx(dev);
4740		nv_txrx_reset(dev);
4741		/* drain queues */
4742		nv_drain_rxtx(dev);
4743		/* delete queues */
4744		free_rings(dev);
4745	}
4746
4747	/* set new values */
4748	np->rx_ring_size = ring->rx_pending;
4749	np->tx_ring_size = ring->tx_pending;
4750
4751	if (!nv_optimized(np)) {
4752		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4753		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4754	} else {
4755		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4756		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4757	}
4758	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4759	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4760	np->ring_addr = ring_addr;
4761
4762	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4763	memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4764
4765	if (netif_running(dev)) {
4766		/* reinit driver view of the queues */
4767		set_bufsize(dev);
4768		if (nv_init_ring(dev)) {
4769			if (!np->in_shutdown)
4770				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4771		}
4772
4773		/* reinit nic view of the queues */
4774		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4775		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4776		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4777			base + NvRegRingSizes);
4778		pci_push(base);
4779		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4780		pci_push(base);
4781
4782		/* restart engines */
4783		nv_start_rxtx(dev);
4784		spin_unlock(&np->lock);
4785		netif_addr_unlock(dev);
4786		netif_tx_unlock_bh(dev);
4787		nv_napi_enable(dev);
4788		nv_enable_irq(dev);
4789	}
4790	return 0;
4791exit:
4792	return -ENOMEM;
4793}
4794
4795static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4796{
4797	struct fe_priv *np = netdev_priv(dev);
4798
4799	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4800	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4801	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4802}
4803
4804static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4805{
4806	struct fe_priv *np = netdev_priv(dev);
4807	int adv, bmcr;
4808
4809	if ((!np->autoneg && np->duplex == 0) ||
4810	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4811		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4812		return -EINVAL;
4813	}
4814	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4815		netdev_info(dev, "hardware does not support tx pause frames\n");
4816		return -EINVAL;
4817	}
4818
4819	netif_carrier_off(dev);
4820	if (netif_running(dev)) {
4821		nv_disable_irq(dev);
4822		netif_tx_lock_bh(dev);
4823		netif_addr_lock(dev);
4824		spin_lock(&np->lock);
4825		/* stop engines */
4826		nv_stop_rxtx(dev);
4827		spin_unlock(&np->lock);
4828		netif_addr_unlock(dev);
4829		netif_tx_unlock_bh(dev);
4830	}
4831
4832	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4833	if (pause->rx_pause)
4834		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4835	if (pause->tx_pause)
4836		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4837
4838	if (np->autoneg && pause->autoneg) {
4839		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4840
4841		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4842		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4843		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4844			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4845		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4846			adv |=  ADVERTISE_PAUSE_ASYM;
4847		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4848
4849		if (netif_running(dev))
4850			netdev_info(dev, "link down\n");
4851		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4852		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4853		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4854	} else {
4855		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4856		if (pause->rx_pause)
4857			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4858		if (pause->tx_pause)
4859			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4860
4861		if (!netif_running(dev))
4862			nv_update_linkspeed(dev);
4863		else
4864			nv_update_pause(dev, np->pause_flags);
4865	}
4866
4867	if (netif_running(dev)) {
4868		nv_start_rxtx(dev);
4869		nv_enable_irq(dev);
4870	}
4871	return 0;
4872}
4873
4874static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4875{
4876	struct fe_priv *np = netdev_priv(dev);
4877	unsigned long flags;
4878	u32 miicontrol;
4879	int err, retval = 0;
4880
4881	spin_lock_irqsave(&np->lock, flags);
4882	miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4883	if (features & NETIF_F_LOOPBACK) {
4884		if (miicontrol & BMCR_LOOPBACK) {
4885			spin_unlock_irqrestore(&np->lock, flags);
4886			netdev_info(dev, "Loopback already enabled\n");
4887			return 0;
4888		}
4889		nv_disable_irq(dev);
4890		/* Turn on loopback mode */
4891		miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4892		err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4893		if (err) {
4894			retval = PHY_ERROR;
4895			spin_unlock_irqrestore(&np->lock, flags);
4896			phy_init(dev);
4897		} else {
4898			if (netif_running(dev)) {
4899				/* Force 1000 Mbps full-duplex */
4900				nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4901									 1);
4902				/* Force link up */
4903				netif_carrier_on(dev);
4904			}
4905			spin_unlock_irqrestore(&np->lock, flags);
4906			netdev_info(dev,
4907				"Internal PHY loopback mode enabled.\n");
4908		}
4909	} else {
4910		if (!(miicontrol & BMCR_LOOPBACK)) {
4911			spin_unlock_irqrestore(&np->lock, flags);
4912			netdev_info(dev, "Loopback already disabled\n");
4913			return 0;
4914		}
4915		nv_disable_irq(dev);
4916		/* Turn off loopback */
4917		spin_unlock_irqrestore(&np->lock, flags);
4918		netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4919		phy_init(dev);
4920	}
4921	msleep(500);
4922	spin_lock_irqsave(&np->lock, flags);
4923	nv_enable_irq(dev);
4924	spin_unlock_irqrestore(&np->lock, flags);
4925
4926	return retval;
4927}
4928
4929static netdev_features_t nv_fix_features(struct net_device *dev,
4930	netdev_features_t features)
4931{
4932	/* vlan is dependent on rx checksum offload */
4933	if (features & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
4934		features |= NETIF_F_RXCSUM;
4935
4936	return features;
4937}
4938
4939static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4940{
4941	struct fe_priv *np = get_nvpriv(dev);
4942
4943	spin_lock_irq(&np->lock);
4944
4945	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4946		np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4947	else
4948		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4949
4950	if (features & NETIF_F_HW_VLAN_CTAG_TX)
4951		np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4952	else
4953		np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4954
4955	writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4956
4957	spin_unlock_irq(&np->lock);
4958}
4959
4960static int nv_set_features(struct net_device *dev, netdev_features_t features)
4961{
4962	struct fe_priv *np = netdev_priv(dev);
4963	u8 __iomem *base = get_hwbase(dev);
4964	netdev_features_t changed = dev->features ^ features;
4965	int retval;
4966
4967	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4968		retval = nv_set_loopback(dev, features);
4969		if (retval != 0)
4970			return retval;
4971	}
4972
4973	if (changed & NETIF_F_RXCSUM) {
4974		spin_lock_irq(&np->lock);
4975
4976		if (features & NETIF_F_RXCSUM)
4977			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4978		else
4979			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4980
4981		if (netif_running(dev))
4982			writel(np->txrxctl_bits, base + NvRegTxRxControl);
4983
4984		spin_unlock_irq(&np->lock);
4985	}
4986
4987	if (changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))
4988		nv_vlan_mode(dev, features);
4989
4990	return 0;
4991}
4992
4993static int nv_get_sset_count(struct net_device *dev, int sset)
4994{
4995	struct fe_priv *np = netdev_priv(dev);
4996
4997	switch (sset) {
4998	case ETH_SS_TEST:
4999		if (np->driver_data & DEV_HAS_TEST_EXTENDED)
5000			return NV_TEST_COUNT_EXTENDED;
5001		else
5002			return NV_TEST_COUNT_BASE;
5003	case ETH_SS_STATS:
5004		if (np->driver_data & DEV_HAS_STATISTICS_V3)
5005			return NV_DEV_STATISTICS_V3_COUNT;
5006		else if (np->driver_data & DEV_HAS_STATISTICS_V2)
5007			return NV_DEV_STATISTICS_V2_COUNT;
5008		else if (np->driver_data & DEV_HAS_STATISTICS_V1)
5009			return NV_DEV_STATISTICS_V1_COUNT;
5010		else
5011			return 0;
5012	default:
5013		return -EOPNOTSUPP;
5014	}
5015}
5016
5017static void nv_get_ethtool_stats(struct net_device *dev,
5018				 struct ethtool_stats *estats, u64 *buffer)
5019	__acquires(&netdev_priv(dev)->hwstats_lock)
5020	__releases(&netdev_priv(dev)->hwstats_lock)
5021{
5022	struct fe_priv *np = netdev_priv(dev);
5023
5024	spin_lock_bh(&np->hwstats_lock);
5025	nv_update_stats(dev);
5026	memcpy(buffer, &np->estats,
5027	       nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
5028	spin_unlock_bh(&np->hwstats_lock);
5029}
5030
5031static int nv_link_test(struct net_device *dev)
5032{
5033	struct fe_priv *np = netdev_priv(dev);
5034	int mii_status;
5035
5036	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5037	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5038
5039	/* check phy link status */
5040	if (!(mii_status & BMSR_LSTATUS))
5041		return 0;
5042	else
5043		return 1;
5044}
5045
5046static int nv_register_test(struct net_device *dev)
5047{
5048	u8 __iomem *base = get_hwbase(dev);
5049	int i = 0;
5050	u32 orig_read, new_read;
5051
5052	do {
5053		orig_read = readl(base + nv_registers_test[i].reg);
5054
5055		/* xor with mask to toggle bits */
5056		orig_read ^= nv_registers_test[i].mask;
5057
5058		writel(orig_read, base + nv_registers_test[i].reg);
5059
5060		new_read = readl(base + nv_registers_test[i].reg);
5061
5062		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
5063			return 0;
5064
5065		/* restore original value */
5066		orig_read ^= nv_registers_test[i].mask;
5067		writel(orig_read, base + nv_registers_test[i].reg);
5068
5069	} while (nv_registers_test[++i].reg != 0);
5070
5071	return 1;
5072}
5073
5074static int nv_interrupt_test(struct net_device *dev)
5075{
5076	struct fe_priv *np = netdev_priv(dev);
5077	u8 __iomem *base = get_hwbase(dev);
5078	int ret = 1;
5079	int testcnt;
5080	u32 save_msi_flags, save_poll_interval = 0;
5081
5082	if (netif_running(dev)) {
5083		/* free current irq */
5084		nv_free_irq(dev);
5085		save_poll_interval = readl(base+NvRegPollingInterval);
5086	}
5087
5088	/* flag to test interrupt handler */
5089	np->intr_test = 0;
5090
5091	/* setup test irq */
5092	save_msi_flags = np->msi_flags;
5093	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
5094	np->msi_flags |= 0x001; /* setup 1 vector */
5095	if (nv_request_irq(dev, 1))
5096		return 0;
5097
5098	/* setup timer interrupt */
5099	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5100	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5101
5102	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5103
5104	/* wait for at least one interrupt */
5105	msleep(100);
5106
5107	spin_lock_irq(&np->lock);
5108
5109	/* flag should be set within ISR */
5110	testcnt = np->intr_test;
5111	if (!testcnt)
5112		ret = 2;
5113
5114	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
5115	if (!(np->msi_flags & NV_MSI_X_ENABLED))
5116		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5117	else
5118		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5119
5120	spin_unlock_irq(&np->lock);
5121
5122	nv_free_irq(dev);
5123
5124	np->msi_flags = save_msi_flags;
5125
5126	if (netif_running(dev)) {
5127		writel(save_poll_interval, base + NvRegPollingInterval);
5128		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5129		/* restore original irq */
5130		if (nv_request_irq(dev, 0))
5131			return 0;
5132	}
5133
5134	return ret;
5135}
5136
5137static int nv_loopback_test(struct net_device *dev)
5138{
5139	struct fe_priv *np = netdev_priv(dev);
5140	u8 __iomem *base = get_hwbase(dev);
5141	struct sk_buff *tx_skb, *rx_skb;
5142	dma_addr_t test_dma_addr;
5143	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
5144	u32 flags;
5145	int len, i, pkt_len;
5146	u8 *pkt_data;
5147	u32 filter_flags = 0;
5148	u32 misc1_flags = 0;
5149	int ret = 1;
5150
5151	if (netif_running(dev)) {
5152		nv_disable_irq(dev);
5153		filter_flags = readl(base + NvRegPacketFilterFlags);
5154		misc1_flags = readl(base + NvRegMisc1);
5155	} else {
5156		nv_txrx_reset(dev);
5157	}
5158
5159	/* reinit driver view of the rx queue */
5160	set_bufsize(dev);
5161	nv_init_ring(dev);
5162
5163	/* setup hardware for loopback */
5164	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
5165	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
5166
5167	/* reinit nic view of the rx queue */
5168	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5169	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5170	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5171		base + NvRegRingSizes);
5172	pci_push(base);
5173
5174	/* restart rx engine */
5175	nv_start_rxtx(dev);
5176
5177	/* setup packet for tx */
5178	pkt_len = ETH_DATA_LEN;
5179	tx_skb = netdev_alloc_skb(dev, pkt_len);
5180	if (!tx_skb) {
5181		ret = 0;
5182		goto out;
5183	}
5184	test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
5185				       skb_tailroom(tx_skb),
5186				       DMA_FROM_DEVICE);
5187	if (unlikely(dma_mapping_error(&np->pci_dev->dev,
5188				       test_dma_addr))) {
5189		dev_kfree_skb_any(tx_skb);
5190		goto out;
5191	}
5192	pkt_data = skb_put(tx_skb, pkt_len);
5193	for (i = 0; i < pkt_len; i++)
5194		pkt_data[i] = (u8)(i & 0xff);
5195
5196	if (!nv_optimized(np)) {
5197		np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
5198		np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5199	} else {
5200		np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
5201		np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
5202		np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
5203	}
5204	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5205	pci_push(get_hwbase(dev));
5206
5207	msleep(500);
5208
5209	/* check for rx of the packet */
5210	if (!nv_optimized(np)) {
5211		flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
5212		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
5213
5214	} else {
5215		flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
5216		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
5217	}
5218
5219	if (flags & NV_RX_AVAIL) {
5220		ret = 0;
5221	} else if (np->desc_ver == DESC_VER_1) {
5222		if (flags & NV_RX_ERROR)
5223			ret = 0;
5224	} else {
5225		if (flags & NV_RX2_ERROR)
5226			ret = 0;
5227	}
5228
5229	if (ret) {
5230		if (len != pkt_len) {
5231			ret = 0;
5232		} else {
5233			rx_skb = np->rx_skb[0].skb;
5234			for (i = 0; i < pkt_len; i++) {
5235				if (rx_skb->data[i] != (u8)(i & 0xff)) {
5236					ret = 0;
5237					break;
5238				}
5239			}
5240		}
5241	}
5242
5243	dma_unmap_single(&np->pci_dev->dev, test_dma_addr,
5244			 (skb_end_pointer(tx_skb) - tx_skb->data),
5245			 DMA_TO_DEVICE);
5246	dev_kfree_skb_any(tx_skb);
5247 out:
5248	/* stop engines */
5249	nv_stop_rxtx(dev);
5250	nv_txrx_reset(dev);
5251	/* drain rx queue */
5252	nv_drain_rxtx(dev);
5253
5254	if (netif_running(dev)) {
5255		writel(misc1_flags, base + NvRegMisc1);
5256		writel(filter_flags, base + NvRegPacketFilterFlags);
5257		nv_enable_irq(dev);
5258	}
5259
5260	return ret;
5261}
5262
5263static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
5264{
5265	struct fe_priv *np = netdev_priv(dev);
5266	u8 __iomem *base = get_hwbase(dev);
5267	int result, count;
5268
5269	count = nv_get_sset_count(dev, ETH_SS_TEST);
5270	memset(buffer, 0, count * sizeof(u64));
5271
5272	if (!nv_link_test(dev)) {
5273		test->flags |= ETH_TEST_FL_FAILED;
5274		buffer[0] = 1;
5275	}
5276
5277	if (test->flags & ETH_TEST_FL_OFFLINE) {
5278		if (netif_running(dev)) {
5279			netif_stop_queue(dev);
5280			nv_napi_disable(dev);
5281			netif_tx_lock_bh(dev);
5282			netif_addr_lock(dev);
5283			spin_lock_irq(&np->lock);
5284			nv_disable_hw_interrupts(dev, np->irqmask);
5285			if (!(np->msi_flags & NV_MSI_X_ENABLED))
5286				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5287			else
5288				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
5289			/* stop engines */
5290			nv_stop_rxtx(dev);
5291			nv_txrx_reset(dev);
5292			/* drain rx queue */
5293			nv_drain_rxtx(dev);
5294			spin_unlock_irq(&np->lock);
5295			netif_addr_unlock(dev);
5296			netif_tx_unlock_bh(dev);
5297		}
5298
5299		if (!nv_register_test(dev)) {
5300			test->flags |= ETH_TEST_FL_FAILED;
5301			buffer[1] = 1;
5302		}
5303
5304		result = nv_interrupt_test(dev);
5305		if (result != 1) {
5306			test->flags |= ETH_TEST_FL_FAILED;
5307			buffer[2] = 1;
5308		}
5309		if (result == 0) {
5310			/* bail out */
5311			return;
5312		}
5313
5314		if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
5315			test->flags |= ETH_TEST_FL_FAILED;
5316			buffer[3] = 1;
5317		}
5318
5319		if (netif_running(dev)) {
5320			/* reinit driver view of the rx queue */
5321			set_bufsize(dev);
5322			if (nv_init_ring(dev)) {
5323				if (!np->in_shutdown)
5324					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5325			}
5326			/* reinit nic view of the rx queue */
5327			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5328			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5329			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5330				base + NvRegRingSizes);
5331			pci_push(base);
5332			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5333			pci_push(base);
5334			/* restart rx engine */
5335			nv_start_rxtx(dev);
5336			netif_start_queue(dev);
5337			nv_napi_enable(dev);
5338			nv_enable_hw_interrupts(dev, np->irqmask);
5339		}
5340	}
5341}
5342
5343static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
5344{
5345	switch (stringset) {
5346	case ETH_SS_STATS:
5347		memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5348		break;
5349	case ETH_SS_TEST:
5350		memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5351		break;
5352	}
5353}
5354
5355static const struct ethtool_ops ops = {
5356	.get_drvinfo = nv_get_drvinfo,
5357	.get_link = ethtool_op_get_link,
5358	.get_wol = nv_get_wol,
5359	.set_wol = nv_set_wol,
 
 
5360	.get_regs_len = nv_get_regs_len,
5361	.get_regs = nv_get_regs,
5362	.nway_reset = nv_nway_reset,
5363	.get_ringparam = nv_get_ringparam,
5364	.set_ringparam = nv_set_ringparam,
5365	.get_pauseparam = nv_get_pauseparam,
5366	.set_pauseparam = nv_set_pauseparam,
5367	.get_strings = nv_get_strings,
5368	.get_ethtool_stats = nv_get_ethtool_stats,
5369	.get_sset_count = nv_get_sset_count,
5370	.self_test = nv_self_test,
5371	.get_ts_info = ethtool_op_get_ts_info,
5372	.get_link_ksettings = nv_get_link_ksettings,
5373	.set_link_ksettings = nv_set_link_ksettings,
5374};
5375
5376/* The mgmt unit and driver use a semaphore to access the phy during init */
5377static int nv_mgmt_acquire_sema(struct net_device *dev)
5378{
5379	struct fe_priv *np = netdev_priv(dev);
5380	u8 __iomem *base = get_hwbase(dev);
5381	int i;
5382	u32 tx_ctrl, mgmt_sema;
5383
5384	for (i = 0; i < 10; i++) {
5385		mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5386		if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5387			break;
5388		msleep(500);
5389	}
5390
5391	if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5392		return 0;
5393
5394	for (i = 0; i < 2; i++) {
5395		tx_ctrl = readl(base + NvRegTransmitterControl);
5396		tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5397		writel(tx_ctrl, base + NvRegTransmitterControl);
5398
5399		/* verify that semaphore was acquired */
5400		tx_ctrl = readl(base + NvRegTransmitterControl);
5401		if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5402		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5403			np->mgmt_sema = 1;
5404			return 1;
5405		} else
5406			udelay(50);
5407	}
5408
5409	return 0;
5410}
5411
5412static void nv_mgmt_release_sema(struct net_device *dev)
5413{
5414	struct fe_priv *np = netdev_priv(dev);
5415	u8 __iomem *base = get_hwbase(dev);
5416	u32 tx_ctrl;
5417
5418	if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5419		if (np->mgmt_sema) {
5420			tx_ctrl = readl(base + NvRegTransmitterControl);
5421			tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5422			writel(tx_ctrl, base + NvRegTransmitterControl);
5423		}
5424	}
5425}
5426
5427
5428static int nv_mgmt_get_version(struct net_device *dev)
5429{
5430	struct fe_priv *np = netdev_priv(dev);
5431	u8 __iomem *base = get_hwbase(dev);
5432	u32 data_ready = readl(base + NvRegTransmitterControl);
5433	u32 data_ready2 = 0;
5434	unsigned long start;
5435	int ready = 0;
5436
5437	writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5438	writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5439	start = jiffies;
5440	while (time_before(jiffies, start + 5*HZ)) {
5441		data_ready2 = readl(base + NvRegTransmitterControl);
5442		if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5443			ready = 1;
5444			break;
5445		}
5446		schedule_timeout_uninterruptible(1);
5447	}
5448
5449	if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5450		return 0;
5451
5452	np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5453
5454	return 1;
5455}
5456
5457static int nv_open(struct net_device *dev)
5458{
5459	struct fe_priv *np = netdev_priv(dev);
5460	u8 __iomem *base = get_hwbase(dev);
5461	int ret = 1;
5462	int oom, i;
5463	u32 low;
5464
5465	/* power up phy */
5466	mii_rw(dev, np->phyaddr, MII_BMCR,
5467	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5468
5469	nv_txrx_gate(dev, false);
5470	/* erase previous misconfiguration */
5471	if (np->driver_data & DEV_HAS_POWER_CNTRL)
5472		nv_mac_reset(dev);
5473	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5474	writel(0, base + NvRegMulticastAddrB);
5475	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5476	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5477	writel(0, base + NvRegPacketFilterFlags);
5478
5479	writel(0, base + NvRegTransmitterControl);
5480	writel(0, base + NvRegReceiverControl);
5481
5482	writel(0, base + NvRegAdapterControl);
5483
5484	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5485		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
5486
5487	/* initialize descriptor rings */
5488	set_bufsize(dev);
5489	oom = nv_init_ring(dev);
5490
5491	writel(0, base + NvRegLinkSpeed);
5492	writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5493	nv_txrx_reset(dev);
5494	writel(0, base + NvRegUnknownSetupReg6);
5495
5496	np->in_shutdown = 0;
5497
5498	/* give hw rings */
5499	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5500	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5501		base + NvRegRingSizes);
5502
5503	writel(np->linkspeed, base + NvRegLinkSpeed);
5504	if (np->desc_ver == DESC_VER_1)
5505		writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5506	else
5507		writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5508	writel(np->txrxctl_bits, base + NvRegTxRxControl);
5509	writel(np->vlanctl_bits, base + NvRegVlanControl);
5510	pci_push(base);
5511	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5512	if (reg_delay(dev, NvRegUnknownSetupReg5,
5513		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5514		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5515		netdev_info(dev,
5516			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
5517
5518	writel(0, base + NvRegMIIMask);
5519	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5520	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5521
5522	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5523	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5524	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5525	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5526
5527	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5528
5529	get_random_bytes(&low, sizeof(low));
5530	low &= NVREG_SLOTTIME_MASK;
5531	if (np->desc_ver == DESC_VER_1) {
5532		writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5533	} else {
5534		if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5535			/* setup legacy backoff */
5536			writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5537		} else {
5538			writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5539			nv_gear_backoff_reseed(dev);
5540		}
5541	}
5542	writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5543	writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5544	if (poll_interval == -1) {
5545		if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5546			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5547		else
5548			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5549	} else
5550		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5551	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5552	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5553			base + NvRegAdapterControl);
5554	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5555	writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5556	if (np->wolenabled)
5557		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5558
5559	i = readl(base + NvRegPowerState);
5560	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5561		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5562
5563	pci_push(base);
5564	udelay(10);
5565	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5566
5567	nv_disable_hw_interrupts(dev, np->irqmask);
5568	pci_push(base);
5569	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5570	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5571	pci_push(base);
5572
5573	if (nv_request_irq(dev, 0))
5574		goto out_drain;
5575
5576	/* ask for interrupts */
5577	nv_enable_hw_interrupts(dev, np->irqmask);
5578
5579	spin_lock_irq(&np->lock);
5580	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5581	writel(0, base + NvRegMulticastAddrB);
5582	writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5583	writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5584	writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5585	/* One manual link speed update: Interrupts are enabled, future link
5586	 * speed changes cause interrupts and are handled by nv_link_irq().
5587	 */
5588	readl(base + NvRegMIIStatus);
5589	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5590
 
 
5591	/* set linkspeed to invalid value, thus force nv_update_linkspeed
5592	 * to init hw */
5593	np->linkspeed = 0;
5594	ret = nv_update_linkspeed(dev);
5595	nv_start_rxtx(dev);
5596	netif_start_queue(dev);
5597	nv_napi_enable(dev);
5598
5599	if (ret) {
5600		netif_carrier_on(dev);
5601	} else {
5602		netdev_info(dev, "no link during initialization\n");
5603		netif_carrier_off(dev);
5604	}
5605	if (oom)
5606		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5607
5608	/* start statistics timer */
5609	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5610		mod_timer(&np->stats_poll,
5611			round_jiffies(jiffies + STATS_INTERVAL));
5612
5613	spin_unlock_irq(&np->lock);
5614
5615	/* If the loopback feature was set while the device was down, make sure
5616	 * that it's set correctly now.
5617	 */
5618	if (dev->features & NETIF_F_LOOPBACK)
5619		nv_set_loopback(dev, dev->features);
5620
5621	return 0;
5622out_drain:
5623	nv_drain_rxtx(dev);
5624	return ret;
5625}
5626
5627static int nv_close(struct net_device *dev)
5628{
5629	struct fe_priv *np = netdev_priv(dev);
5630	u8 __iomem *base;
5631
5632	spin_lock_irq(&np->lock);
5633	np->in_shutdown = 1;
5634	spin_unlock_irq(&np->lock);
5635	nv_napi_disable(dev);
5636	synchronize_irq(np->pci_dev->irq);
5637
5638	del_timer_sync(&np->oom_kick);
5639	del_timer_sync(&np->nic_poll);
5640	del_timer_sync(&np->stats_poll);
5641
5642	netif_stop_queue(dev);
5643	spin_lock_irq(&np->lock);
5644	nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */
5645	nv_stop_rxtx(dev);
5646	nv_txrx_reset(dev);
5647
5648	/* disable interrupts on the nic or we will lock up */
5649	base = get_hwbase(dev);
5650	nv_disable_hw_interrupts(dev, np->irqmask);
5651	pci_push(base);
5652
5653	spin_unlock_irq(&np->lock);
5654
5655	nv_free_irq(dev);
5656
5657	nv_drain_rxtx(dev);
5658
5659	if (np->wolenabled || !phy_power_down) {
5660		nv_txrx_gate(dev, false);
5661		writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5662		nv_start_rx(dev);
5663	} else {
5664		/* power down phy */
5665		mii_rw(dev, np->phyaddr, MII_BMCR,
5666		       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5667		nv_txrx_gate(dev, true);
5668	}
5669
5670	/* FIXME: power down nic */
5671
5672	return 0;
5673}
5674
5675static const struct net_device_ops nv_netdev_ops = {
5676	.ndo_open		= nv_open,
5677	.ndo_stop		= nv_close,
5678	.ndo_get_stats64	= nv_get_stats64,
5679	.ndo_start_xmit		= nv_start_xmit,
5680	.ndo_tx_timeout		= nv_tx_timeout,
5681	.ndo_change_mtu		= nv_change_mtu,
5682	.ndo_fix_features	= nv_fix_features,
5683	.ndo_set_features	= nv_set_features,
5684	.ndo_validate_addr	= eth_validate_addr,
5685	.ndo_set_mac_address	= nv_set_mac_address,
5686	.ndo_set_rx_mode	= nv_set_multicast,
5687#ifdef CONFIG_NET_POLL_CONTROLLER
5688	.ndo_poll_controller	= nv_poll_controller,
5689#endif
5690};
5691
5692static const struct net_device_ops nv_netdev_ops_optimized = {
5693	.ndo_open		= nv_open,
5694	.ndo_stop		= nv_close,
5695	.ndo_get_stats64	= nv_get_stats64,
5696	.ndo_start_xmit		= nv_start_xmit_optimized,
5697	.ndo_tx_timeout		= nv_tx_timeout,
5698	.ndo_change_mtu		= nv_change_mtu,
5699	.ndo_fix_features	= nv_fix_features,
5700	.ndo_set_features	= nv_set_features,
5701	.ndo_validate_addr	= eth_validate_addr,
5702	.ndo_set_mac_address	= nv_set_mac_address,
5703	.ndo_set_rx_mode	= nv_set_multicast,
5704#ifdef CONFIG_NET_POLL_CONTROLLER
5705	.ndo_poll_controller	= nv_poll_controller,
5706#endif
5707};
5708
5709static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5710{
5711	struct net_device *dev;
5712	struct fe_priv *np;
5713	unsigned long addr;
5714	u8 __iomem *base;
5715	int err, i;
5716	u32 powerstate, txreg;
5717	u32 phystate_orig = 0, phystate;
5718	int phyinitialized = 0;
5719	static int printed_version;
5720	u8 mac[ETH_ALEN];
5721
5722	if (!printed_version++)
5723		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5724			FORCEDETH_VERSION);
5725
5726	dev = alloc_etherdev(sizeof(struct fe_priv));
5727	err = -ENOMEM;
5728	if (!dev)
5729		goto out;
5730
5731	np = netdev_priv(dev);
5732	np->dev = dev;
5733	np->pci_dev = pci_dev;
5734	spin_lock_init(&np->lock);
5735	spin_lock_init(&np->hwstats_lock);
5736	SET_NETDEV_DEV(dev, &pci_dev->dev);
5737	u64_stats_init(&np->swstats_rx_syncp);
5738	u64_stats_init(&np->swstats_tx_syncp);
5739	np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5740	if (!np->txrx_stats) {
5741		pr_err("np->txrx_stats, alloc memory error.\n");
5742		err = -ENOMEM;
5743		goto out_alloc_percpu;
5744	}
5745
5746	timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5747	timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
5748	timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE);
 
 
 
 
 
 
5749
5750	err = pci_enable_device(pci_dev);
5751	if (err)
5752		goto out_free;
5753
5754	pci_set_master(pci_dev);
5755
5756	err = pci_request_regions(pci_dev, DRV_NAME);
5757	if (err < 0)
5758		goto out_disable;
5759
5760	if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5761		np->register_size = NV_PCI_REGSZ_VER3;
5762	else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5763		np->register_size = NV_PCI_REGSZ_VER2;
5764	else
5765		np->register_size = NV_PCI_REGSZ_VER1;
5766
5767	err = -EINVAL;
5768	addr = 0;
5769	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5770		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5771				pci_resource_len(pci_dev, i) >= np->register_size) {
5772			addr = pci_resource_start(pci_dev, i);
5773			break;
5774		}
5775	}
5776	if (i == DEVICE_COUNT_RESOURCE) {
5777		dev_info(&pci_dev->dev, "Couldn't find register window\n");
5778		goto out_relreg;
5779	}
5780
5781	/* copy of driver data */
5782	np->driver_data = id->driver_data;
5783	/* copy of device id */
5784	np->device_id = id->device;
5785
5786	/* handle different descriptor versions */
5787	if (id->driver_data & DEV_HAS_HIGH_DMA) {
5788		/* packet format 3: supports 40-bit addressing */
5789		np->desc_ver = DESC_VER_3;
5790		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5791		if (dma_64bit) {
5792			if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39)))
5793				dev_info(&pci_dev->dev,
5794					 "64-bit DMA failed, using 32-bit addressing\n");
5795			else
5796				dev->features |= NETIF_F_HIGHDMA;
 
 
 
 
5797		}
5798	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
5799		/* packet format 2: supports jumbo frames */
5800		np->desc_ver = DESC_VER_2;
5801		np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5802	} else {
5803		/* original packet format */
5804		np->desc_ver = DESC_VER_1;
5805		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5806	}
5807
5808	np->pkt_limit = NV_PKTLIMIT_1;
5809	if (id->driver_data & DEV_HAS_LARGEDESC)
5810		np->pkt_limit = NV_PKTLIMIT_2;
5811
5812	if (id->driver_data & DEV_HAS_CHECKSUM) {
5813		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5814		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5815			NETIF_F_TSO | NETIF_F_RXCSUM;
5816	}
5817
5818	np->vlanctl_bits = 0;
5819	if (id->driver_data & DEV_HAS_VLAN) {
5820		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5821		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
5822				    NETIF_F_HW_VLAN_CTAG_TX;
5823	}
5824
5825	dev->features |= dev->hw_features;
5826
5827	/* Add loopback capability to the device. */
5828	dev->hw_features |= NETIF_F_LOOPBACK;
5829
5830	/* MTU range: 64 - 1500 or 9100 */
5831	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
5832	dev->max_mtu = np->pkt_limit;
5833
5834	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5835	if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5836	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5837	    (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5838		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5839	}
5840
5841	err = -ENOMEM;
5842	np->base = ioremap(addr, np->register_size);
5843	if (!np->base)
5844		goto out_relreg;
5845
5846	np->rx_ring_size = RX_RING_DEFAULT;
5847	np->tx_ring_size = TX_RING_DEFAULT;
5848
5849	if (!nv_optimized(np)) {
5850		np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev,
5851						      sizeof(struct ring_desc) *
5852						      (np->rx_ring_size +
5853						      np->tx_ring_size),
5854						      &np->ring_addr,
5855						      GFP_KERNEL);
5856		if (!np->rx_ring.orig)
5857			goto out_unmap;
5858		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5859	} else {
5860		np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev,
5861						    sizeof(struct ring_desc_ex) *
5862						    (np->rx_ring_size +
5863						    np->tx_ring_size),
5864						    &np->ring_addr, GFP_KERNEL);
5865		if (!np->rx_ring.ex)
5866			goto out_unmap;
5867		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5868	}
5869	np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5870	np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5871	if (!np->rx_skb || !np->tx_skb)
5872		goto out_freering;
5873
5874	if (!nv_optimized(np))
5875		dev->netdev_ops = &nv_netdev_ops;
5876	else
5877		dev->netdev_ops = &nv_netdev_ops_optimized;
5878
5879	netif_napi_add(dev, &np->napi, nv_napi_poll);
5880	dev->ethtool_ops = &ops;
5881	dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5882
5883	pci_set_drvdata(pci_dev, dev);
5884
5885	/* read the mac address */
5886	base = get_hwbase(dev);
5887	np->orig_mac[0] = readl(base + NvRegMacAddrA);
5888	np->orig_mac[1] = readl(base + NvRegMacAddrB);
5889
5890	/* check the workaround bit for correct mac address order */
5891	txreg = readl(base + NvRegTransmitPoll);
5892	if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5893		/* mac address is already in correct order */
5894		mac[0] = (np->orig_mac[0] >>  0) & 0xff;
5895		mac[1] = (np->orig_mac[0] >>  8) & 0xff;
5896		mac[2] = (np->orig_mac[0] >> 16) & 0xff;
5897		mac[3] = (np->orig_mac[0] >> 24) & 0xff;
5898		mac[4] = (np->orig_mac[1] >>  0) & 0xff;
5899		mac[5] = (np->orig_mac[1] >>  8) & 0xff;
5900	} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5901		/* mac address is already in correct order */
5902		mac[0] = (np->orig_mac[0] >>  0) & 0xff;
5903		mac[1] = (np->orig_mac[0] >>  8) & 0xff;
5904		mac[2] = (np->orig_mac[0] >> 16) & 0xff;
5905		mac[3] = (np->orig_mac[0] >> 24) & 0xff;
5906		mac[4] = (np->orig_mac[1] >>  0) & 0xff;
5907		mac[5] = (np->orig_mac[1] >>  8) & 0xff;
5908		/*
5909		 * Set orig mac address back to the reversed version.
5910		 * This flag will be cleared during low power transition.
5911		 * Therefore, we should always put back the reversed address.
5912		 */
5913		np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
5914			(mac[3] << 16) + (mac[2] << 24);
5915		np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
5916	} else {
5917		/* need to reverse mac address to correct order */
5918		mac[0] = (np->orig_mac[1] >>  8) & 0xff;
5919		mac[1] = (np->orig_mac[1] >>  0) & 0xff;
5920		mac[2] = (np->orig_mac[0] >> 24) & 0xff;
5921		mac[3] = (np->orig_mac[0] >> 16) & 0xff;
5922		mac[4] = (np->orig_mac[0] >>  8) & 0xff;
5923		mac[5] = (np->orig_mac[0] >>  0) & 0xff;
5924		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5925		dev_dbg(&pci_dev->dev,
5926			"%s: set workaround bit for reversed mac addr\n",
5927			__func__);
5928	}
5929
5930	if (is_valid_ether_addr(mac)) {
5931		eth_hw_addr_set(dev, mac);
5932	} else {
5933		/*
5934		 * Bad mac address. At least one bios sets the mac address
5935		 * to 01:23:45:67:89:ab
5936		 */
5937		dev_err(&pci_dev->dev,
5938			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5939			mac);
5940		eth_hw_addr_random(dev);
5941		dev_err(&pci_dev->dev,
5942			"Using random MAC address: %pM\n", dev->dev_addr);
5943	}
5944
5945	/* set mac address */
5946	nv_copy_mac_to_hw(dev);
5947
5948	/* disable WOL */
5949	writel(0, base + NvRegWakeUpFlags);
5950	np->wolenabled = 0;
5951	device_set_wakeup_enable(&pci_dev->dev, false);
5952
5953	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5954
5955		/* take phy and nic out of low power mode */
5956		powerstate = readl(base + NvRegPowerState2);
5957		powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5958		if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5959		    pci_dev->revision >= 0xA3)
5960			powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5961		writel(powerstate, base + NvRegPowerState2);
5962	}
5963
5964	if (np->desc_ver == DESC_VER_1)
5965		np->tx_flags = NV_TX_VALID;
5966	else
5967		np->tx_flags = NV_TX2_VALID;
5968
5969	np->msi_flags = 0;
5970	if ((id->driver_data & DEV_HAS_MSI) && msi)
5971		np->msi_flags |= NV_MSI_CAPABLE;
5972
5973	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5974		/* msix has had reported issues when modifying irqmask
5975		   as in the case of napi, therefore, disable for now
5976		*/
5977#if 0
5978		np->msi_flags |= NV_MSI_X_CAPABLE;
5979#endif
5980	}
5981
5982	if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5983		np->irqmask = NVREG_IRQMASK_CPU;
5984		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5985			np->msi_flags |= 0x0001;
5986	} else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5987		   !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5988		/* start off in throughput mode */
5989		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5990		/* remove support for msix mode */
5991		np->msi_flags &= ~NV_MSI_X_CAPABLE;
5992	} else {
5993		optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5994		np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5995		if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5996			np->msi_flags |= 0x0003;
5997	}
5998
5999	if (id->driver_data & DEV_NEED_TIMERIRQ)
6000		np->irqmask |= NVREG_IRQ_TIMER;
6001	if (id->driver_data & DEV_NEED_LINKTIMER) {
6002		np->need_linktimer = 1;
6003		np->link_timeout = jiffies + LINK_TIMEOUT;
6004	} else {
6005		np->need_linktimer = 0;
6006	}
6007
6008	/* Limit the number of tx's outstanding for hw bug */
6009	if (id->driver_data & DEV_NEED_TX_LIMIT) {
6010		np->tx_limit = 1;
6011		if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
6012		    pci_dev->revision >= 0xA2)
6013			np->tx_limit = 0;
6014	}
6015
6016	/* clear phy state and temporarily halt phy interrupts */
6017	writel(0, base + NvRegMIIMask);
6018	phystate = readl(base + NvRegAdapterControl);
6019	if (phystate & NVREG_ADAPTCTL_RUNNING) {
6020		phystate_orig = 1;
6021		phystate &= ~NVREG_ADAPTCTL_RUNNING;
6022		writel(phystate, base + NvRegAdapterControl);
6023	}
6024	writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
6025
6026	if (id->driver_data & DEV_HAS_MGMT_UNIT) {
6027		/* management unit running on the mac? */
6028		if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
6029		    (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
6030		    nv_mgmt_acquire_sema(dev) &&
6031		    nv_mgmt_get_version(dev)) {
6032			np->mac_in_use = 1;
6033			if (np->mgmt_version > 0)
6034				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
6035			/* management unit setup the phy already? */
6036			if (np->mac_in_use &&
6037			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
6038			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
6039				/* phy is inited by mgmt unit */
6040				phyinitialized = 1;
6041			} else {
6042				/* we need to init the phy */
6043			}
6044		}
6045	}
6046
6047	/* find a suitable phy */
6048	for (i = 1; i <= 32; i++) {
6049		int id1, id2;
6050		int phyaddr = i & 0x1F;
6051
6052		spin_lock_irq(&np->lock);
6053		id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
6054		spin_unlock_irq(&np->lock);
6055		if (id1 < 0 || id1 == 0xffff)
6056			continue;
6057		spin_lock_irq(&np->lock);
6058		id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
6059		spin_unlock_irq(&np->lock);
6060		if (id2 < 0 || id2 == 0xffff)
6061			continue;
6062
6063		np->phy_model = id2 & PHYID2_MODEL_MASK;
6064		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
6065		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
6066		np->phyaddr = phyaddr;
6067		np->phy_oui = id1 | id2;
6068
6069		/* Realtek hardcoded phy id1 to all zero's on certain phys */
6070		if (np->phy_oui == PHY_OUI_REALTEK2)
6071			np->phy_oui = PHY_OUI_REALTEK;
6072		/* Setup phy revision for Realtek */
6073		if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
6074			np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
6075
6076		break;
6077	}
6078	if (i == 33) {
6079		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
6080		goto out_error;
6081	}
6082
6083	if (!phyinitialized) {
6084		/* reset it */
6085		phy_init(dev);
6086	} else {
6087		/* see if it is a gigabit phy */
6088		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
6089		if (mii_status & PHY_GIGABIT)
6090			np->gigabit = PHY_GIGABIT;
6091	}
6092
6093	/* set default link speed settings */
6094	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
6095	np->duplex = 0;
6096	np->autoneg = 1;
6097
6098	err = register_netdev(dev);
6099	if (err) {
6100		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
6101		goto out_error;
6102	}
6103
6104	netif_carrier_off(dev);
6105
6106	/* Some NICs freeze when TX pause is enabled while NIC is
6107	 * down, and this stays across warm reboots. The sequence
6108	 * below should be enough to recover from that state.
6109	 */
6110	nv_update_pause(dev, 0);
6111	nv_start_tx(dev);
6112	nv_stop_tx(dev);
6113
6114	if (id->driver_data & DEV_HAS_VLAN)
6115		nv_vlan_mode(dev, dev->features);
6116
6117	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
6118		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
6119
6120	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
6121		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
6122		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
6123			"csum " : "",
6124		 dev->features & (NETIF_F_HW_VLAN_CTAG_RX |
6125				  NETIF_F_HW_VLAN_CTAG_TX) ?
6126			"vlan " : "",
6127		 dev->features & (NETIF_F_LOOPBACK) ?
6128			"loopback " : "",
6129		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
6130		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
6131		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
6132		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
6133		 np->need_linktimer ? "lnktim " : "",
6134		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
6135		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
6136		 np->desc_ver);
6137
6138	return 0;
6139
6140out_error:
6141	nv_mgmt_release_sema(dev);
6142	if (phystate_orig)
6143		writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
6144out_freering:
6145	free_rings(dev);
6146out_unmap:
6147	iounmap(get_hwbase(dev));
6148out_relreg:
6149	pci_release_regions(pci_dev);
6150out_disable:
6151	pci_disable_device(pci_dev);
6152out_free:
6153	free_percpu(np->txrx_stats);
6154out_alloc_percpu:
6155	free_netdev(dev);
6156out:
6157	return err;
6158}
6159
6160static void nv_restore_phy(struct net_device *dev)
6161{
6162	struct fe_priv *np = netdev_priv(dev);
6163	u16 phy_reserved, mii_control;
6164
6165	if (np->phy_oui == PHY_OUI_REALTEK &&
6166	    np->phy_model == PHY_MODEL_REALTEK_8201 &&
6167	    phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
6168		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
6169		phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
6170		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
6171		phy_reserved |= PHY_REALTEK_INIT8;
6172		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
6173		mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
6174
6175		/* restart auto negotiation */
6176		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
6177		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
6178		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
6179	}
6180}
6181
6182static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6183{
6184	struct net_device *dev = pci_get_drvdata(pci_dev);
6185	struct fe_priv *np = netdev_priv(dev);
6186	u8 __iomem *base = get_hwbase(dev);
6187
6188	/* special op: write back the misordered MAC address - otherwise
6189	 * the next nv_probe would see a wrong address.
6190	 */
6191	writel(np->orig_mac[0], base + NvRegMacAddrA);
6192	writel(np->orig_mac[1], base + NvRegMacAddrB);
6193	writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
6194	       base + NvRegTransmitPoll);
6195}
6196
6197static void nv_remove(struct pci_dev *pci_dev)
6198{
6199	struct net_device *dev = pci_get_drvdata(pci_dev);
6200	struct fe_priv *np = netdev_priv(dev);
6201
6202	free_percpu(np->txrx_stats);
6203
6204	unregister_netdev(dev);
6205
6206	nv_restore_mac_addr(pci_dev);
6207
6208	/* restore any phy related changes */
6209	nv_restore_phy(dev);
6210
6211	nv_mgmt_release_sema(dev);
6212
6213	/* free all structures */
6214	free_rings(dev);
6215	iounmap(get_hwbase(dev));
6216	pci_release_regions(pci_dev);
6217	pci_disable_device(pci_dev);
6218	free_netdev(dev);
6219}
6220
6221#ifdef CONFIG_PM_SLEEP
6222static int nv_suspend(struct device *device)
6223{
6224	struct net_device *dev = dev_get_drvdata(device);
 
6225	struct fe_priv *np = netdev_priv(dev);
6226	u8 __iomem *base = get_hwbase(dev);
6227	int i;
6228
6229	if (netif_running(dev)) {
6230		/* Gross. */
6231		nv_close(dev);
6232	}
6233	netif_device_detach(dev);
6234
6235	/* save non-pci configuration space */
6236	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6237		np->saved_config_space[i] = readl(base + i*sizeof(u32));
6238
6239	return 0;
6240}
6241
6242static int nv_resume(struct device *device)
6243{
6244	struct pci_dev *pdev = to_pci_dev(device);
6245	struct net_device *dev = pci_get_drvdata(pdev);
6246	struct fe_priv *np = netdev_priv(dev);
6247	u8 __iomem *base = get_hwbase(dev);
6248	int i, rc = 0;
6249
6250	/* restore non-pci configuration space */
6251	for (i = 0; i <= np->register_size/sizeof(u32); i++)
6252		writel(np->saved_config_space[i], base+i*sizeof(u32));
6253
6254	if (np->driver_data & DEV_NEED_MSI_FIX)
6255		pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
6256
6257	/* restore phy state, including autoneg */
6258	phy_init(dev);
6259
6260	netif_device_attach(dev);
6261	if (netif_running(dev)) {
6262		rc = nv_open(dev);
6263		nv_set_multicast(dev);
6264	}
6265	return rc;
6266}
6267
6268static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
6269#define NV_PM_OPS (&nv_pm_ops)
6270
6271#else
6272#define NV_PM_OPS NULL
6273#endif /* CONFIG_PM_SLEEP */
6274
6275#ifdef CONFIG_PM
6276static void nv_shutdown(struct pci_dev *pdev)
6277{
6278	struct net_device *dev = pci_get_drvdata(pdev);
6279	struct fe_priv *np = netdev_priv(dev);
6280
6281	if (netif_running(dev))
6282		nv_close(dev);
6283
6284	/*
6285	 * Restore the MAC so a kernel started by kexec won't get confused.
6286	 * If we really go for poweroff, we must not restore the MAC,
6287	 * otherwise the MAC for WOL will be reversed at least on some boards.
6288	 */
6289	if (system_state != SYSTEM_POWER_OFF)
6290		nv_restore_mac_addr(pdev);
6291
6292	pci_disable_device(pdev);
6293	/*
6294	 * Apparently it is not possible to reinitialise from D3 hot,
6295	 * only put the device into D3 if we really go for poweroff.
6296	 */
6297	if (system_state == SYSTEM_POWER_OFF) {
6298		pci_wake_from_d3(pdev, np->wolenabled);
6299		pci_set_power_state(pdev, PCI_D3hot);
6300	}
6301}
6302#else
6303#define nv_shutdown NULL
6304#endif /* CONFIG_PM */
6305
6306static const struct pci_device_id pci_tbl[] = {
6307	{	/* nForce Ethernet Controller */
6308		PCI_DEVICE(0x10DE, 0x01C3),
6309		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6310	},
6311	{	/* nForce2 Ethernet Controller */
6312		PCI_DEVICE(0x10DE, 0x0066),
6313		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6314	},
6315	{	/* nForce3 Ethernet Controller */
6316		PCI_DEVICE(0x10DE, 0x00D6),
6317		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
6318	},
6319	{	/* nForce3 Ethernet Controller */
6320		PCI_DEVICE(0x10DE, 0x0086),
6321		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6322	},
6323	{	/* nForce3 Ethernet Controller */
6324		PCI_DEVICE(0x10DE, 0x008C),
6325		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6326	},
6327	{	/* nForce3 Ethernet Controller */
6328		PCI_DEVICE(0x10DE, 0x00E6),
6329		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6330	},
6331	{	/* nForce3 Ethernet Controller */
6332		PCI_DEVICE(0x10DE, 0x00DF),
6333		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6334	},
6335	{	/* CK804 Ethernet Controller */
6336		PCI_DEVICE(0x10DE, 0x0056),
6337		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6338	},
6339	{	/* CK804 Ethernet Controller */
6340		PCI_DEVICE(0x10DE, 0x0057),
6341		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6342	},
6343	{	/* MCP04 Ethernet Controller */
6344		PCI_DEVICE(0x10DE, 0x0037),
6345		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6346	},
6347	{	/* MCP04 Ethernet Controller */
6348		PCI_DEVICE(0x10DE, 0x0038),
6349		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6350	},
6351	{	/* MCP51 Ethernet Controller */
6352		PCI_DEVICE(0x10DE, 0x0268),
6353		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6354	},
6355	{	/* MCP51 Ethernet Controller */
6356		PCI_DEVICE(0x10DE, 0x0269),
6357		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6358	},
6359	{	/* MCP55 Ethernet Controller */
6360		PCI_DEVICE(0x10DE, 0x0372),
6361		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6362	},
6363	{	/* MCP55 Ethernet Controller */
6364		PCI_DEVICE(0x10DE, 0x0373),
6365		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6366	},
6367	{	/* MCP61 Ethernet Controller */
6368		PCI_DEVICE(0x10DE, 0x03E5),
6369		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6370	},
6371	{	/* MCP61 Ethernet Controller */
6372		PCI_DEVICE(0x10DE, 0x03E6),
6373		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6374	},
6375	{	/* MCP61 Ethernet Controller */
6376		PCI_DEVICE(0x10DE, 0x03EE),
6377		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6378	},
6379	{	/* MCP61 Ethernet Controller */
6380		PCI_DEVICE(0x10DE, 0x03EF),
6381		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6382	},
6383	{	/* MCP65 Ethernet Controller */
6384		PCI_DEVICE(0x10DE, 0x0450),
6385		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6386	},
6387	{	/* MCP65 Ethernet Controller */
6388		PCI_DEVICE(0x10DE, 0x0451),
6389		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6390	},
6391	{	/* MCP65 Ethernet Controller */
6392		PCI_DEVICE(0x10DE, 0x0452),
6393		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6394	},
6395	{	/* MCP65 Ethernet Controller */
6396		PCI_DEVICE(0x10DE, 0x0453),
6397		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6398	},
6399	{	/* MCP67 Ethernet Controller */
6400		PCI_DEVICE(0x10DE, 0x054C),
6401		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6402	},
6403	{	/* MCP67 Ethernet Controller */
6404		PCI_DEVICE(0x10DE, 0x054D),
6405		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6406	},
6407	{	/* MCP67 Ethernet Controller */
6408		PCI_DEVICE(0x10DE, 0x054E),
6409		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6410	},
6411	{	/* MCP67 Ethernet Controller */
6412		PCI_DEVICE(0x10DE, 0x054F),
6413		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6414	},
6415	{	/* MCP73 Ethernet Controller */
6416		PCI_DEVICE(0x10DE, 0x07DC),
6417		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6418	},
6419	{	/* MCP73 Ethernet Controller */
6420		PCI_DEVICE(0x10DE, 0x07DD),
6421		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6422	},
6423	{	/* MCP73 Ethernet Controller */
6424		PCI_DEVICE(0x10DE, 0x07DE),
6425		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6426	},
6427	{	/* MCP73 Ethernet Controller */
6428		PCI_DEVICE(0x10DE, 0x07DF),
6429		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6430	},
6431	{	/* MCP77 Ethernet Controller */
6432		PCI_DEVICE(0x10DE, 0x0760),
6433		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6434	},
6435	{	/* MCP77 Ethernet Controller */
6436		PCI_DEVICE(0x10DE, 0x0761),
6437		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6438	},
6439	{	/* MCP77 Ethernet Controller */
6440		PCI_DEVICE(0x10DE, 0x0762),
6441		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6442	},
6443	{	/* MCP77 Ethernet Controller */
6444		PCI_DEVICE(0x10DE, 0x0763),
6445		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6446	},
6447	{	/* MCP79 Ethernet Controller */
6448		PCI_DEVICE(0x10DE, 0x0AB0),
6449		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6450	},
6451	{	/* MCP79 Ethernet Controller */
6452		PCI_DEVICE(0x10DE, 0x0AB1),
6453		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6454	},
6455	{	/* MCP79 Ethernet Controller */
6456		PCI_DEVICE(0x10DE, 0x0AB2),
6457		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6458	},
6459	{	/* MCP79 Ethernet Controller */
6460		PCI_DEVICE(0x10DE, 0x0AB3),
6461		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6462	},
6463	{	/* MCP89 Ethernet Controller */
6464		PCI_DEVICE(0x10DE, 0x0D7D),
6465		.driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6466	},
6467	{0,},
6468};
6469
6470static struct pci_driver forcedeth_pci_driver = {
6471	.name		= DRV_NAME,
6472	.id_table	= pci_tbl,
6473	.probe		= nv_probe,
6474	.remove		= nv_remove,
6475	.shutdown	= nv_shutdown,
6476	.driver.pm	= NV_PM_OPS,
6477};
6478
6479module_param(max_interrupt_work, int, 0);
6480MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6481module_param(optimization_mode, int, 0);
6482MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6483module_param(poll_interval, int, 0);
6484MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6485module_param(msi, int, 0);
6486MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6487module_param(msix, int, 0);
6488MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6489module_param(dma_64bit, int, 0);
6490MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6491module_param(phy_cross, int, 0);
6492MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6493module_param(phy_power_down, int, 0);
6494MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6495module_param(debug_tx_timeout, bool, 0);
6496MODULE_PARM_DESC(debug_tx_timeout,
6497		 "Dump tx related registers and ring when tx_timeout happens");
6498
6499module_pci_driver(forcedeth_pci_driver);
6500MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6501MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6502MODULE_LICENSE("GPL");
6503MODULE_DEVICE_TABLE(pci, pci_tbl);