Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1/*******************************************************************************
   2
   3  Intel PRO/100 Linux driver
   4  Copyright(c) 1999 - 2006 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29/*
  30 *	e100.c: Intel(R) PRO/100 ethernet driver
  31 *
  32 *	(Re)written 2003 by scott.feldman@intel.com.  Based loosely on
  33 *	original e100 driver, but better described as a munging of
  34 *	e100, e1000, eepro100, tg3, 8139cp, and other drivers.
  35 *
  36 *	References:
  37 *		Intel 8255x 10/100 Mbps Ethernet Controller Family,
  38 *		Open Source Software Developers Manual,
  39 *		http://sourceforge.net/projects/e1000
  40 *
  41 *
  42 *	                      Theory of Operation
  43 *
  44 *	I.   General
  45 *
  46 *	The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
  47 *	controller family, which includes the 82557, 82558, 82559, 82550,
  48 *	82551, and 82562 devices.  82558 and greater controllers
  49 *	integrate the Intel 82555 PHY.  The controllers are used in
  50 *	server and client network interface cards, as well as in
  51 *	LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
  52 *	configurations.  8255x supports a 32-bit linear addressing
  53 *	mode and operates at 33Mhz PCI clock rate.
  54 *
  55 *	II.  Driver Operation
  56 *
  57 *	Memory-mapped mode is used exclusively to access the device's
  58 *	shared-memory structure, the Control/Status Registers (CSR). All
  59 *	setup, configuration, and control of the device, including queuing
  60 *	of Tx, Rx, and configuration commands is through the CSR.
  61 *	cmd_lock serializes accesses to the CSR command register.  cb_lock
  62 *	protects the shared Command Block List (CBL).
  63 *
  64 *	8255x is highly MII-compliant and all access to the PHY go
  65 *	through the Management Data Interface (MDI).  Consequently, the
  66 *	driver leverages the mii.c library shared with other MII-compliant
  67 *	devices.
  68 *
  69 *	Big- and Little-Endian byte order as well as 32- and 64-bit
  70 *	archs are supported.  Weak-ordered memory and non-cache-coherent
  71 *	archs are supported.
  72 *
  73 *	III. Transmit
  74 *
  75 *	A Tx skb is mapped and hangs off of a TCB.  TCBs are linked
  76 *	together in a fixed-size ring (CBL) thus forming the flexible mode
  77 *	memory structure.  A TCB marked with the suspend-bit indicates
  78 *	the end of the ring.  The last TCB processed suspends the
  79 *	controller, and the controller can be restarted by issue a CU
  80 *	resume command to continue from the suspend point, or a CU start
  81 *	command to start at a given position in the ring.
  82 *
  83 *	Non-Tx commands (config, multicast setup, etc) are linked
  84 *	into the CBL ring along with Tx commands.  The common structure
  85 *	used for both Tx and non-Tx commands is the Command Block (CB).
  86 *
  87 *	cb_to_use is the next CB to use for queuing a command; cb_to_clean
  88 *	is the next CB to check for completion; cb_to_send is the first
  89 *	CB to start on in case of a previous failure to resume.  CB clean
  90 *	up happens in interrupt context in response to a CU interrupt.
  91 *	cbs_avail keeps track of number of free CB resources available.
  92 *
  93 * 	Hardware padding of short packets to minimum packet size is
  94 * 	enabled.  82557 pads with 7Eh, while the later controllers pad
  95 * 	with 00h.
  96 *
  97 *	IV.  Receive
  98 *
  99 *	The Receive Frame Area (RFA) comprises a ring of Receive Frame
 100 *	Descriptors (RFD) + data buffer, thus forming the simplified mode
 101 *	memory structure.  Rx skbs are allocated to contain both the RFD
 102 *	and the data buffer, but the RFD is pulled off before the skb is
 103 *	indicated.  The data buffer is aligned such that encapsulated
 104 *	protocol headers are u32-aligned.  Since the RFD is part of the
 105 *	mapped shared memory, and completion status is contained within
 106 *	the RFD, the RFD must be dma_sync'ed to maintain a consistent
 107 *	view from software and hardware.
 108 *
 109 *	In order to keep updates to the RFD link field from colliding with
 110 *	hardware writes to mark packets complete, we use the feature that
 111 *	hardware will not write to a size 0 descriptor and mark the previous
 112 *	packet as end-of-list (EL).   After updating the link, we remove EL
 113 *	and only then restore the size such that hardware may use the
 114 *	previous-to-end RFD.
 115 *
 116 *	Under typical operation, the  receive unit (RU) is start once,
 117 *	and the controller happily fills RFDs as frames arrive.  If
 118 *	replacement RFDs cannot be allocated, or the RU goes non-active,
 119 *	the RU must be restarted.  Frame arrival generates an interrupt,
 120 *	and Rx indication and re-allocation happen in the same context,
 121 *	therefore no locking is required.  A software-generated interrupt
 122 *	is generated from the watchdog to recover from a failed allocation
 123 *	scenario where all Rx resources have been indicated and none re-
 124 *	placed.
 125 *
 126 *	V.   Miscellaneous
 127 *
 128 * 	VLAN offloading of tagging, stripping and filtering is not
 129 * 	supported, but driver will accommodate the extra 4-byte VLAN tag
 130 * 	for processing by upper layers.  Tx/Rx Checksum offloading is not
 131 * 	supported.  Tx Scatter/Gather is not supported.  Jumbo Frames is
 132 * 	not supported (hardware limitation).
 133 *
 134 * 	MagicPacket(tm) WoL support is enabled/disabled via ethtool.
 135 *
 136 * 	Thanks to JC (jchapman@katalix.com) for helping with
 137 * 	testing/troubleshooting the development driver.
 138 *
 139 * 	TODO:
 140 * 	o several entry points race with dev->close
 141 * 	o check for tx-no-resources/stop Q races with tx clean/wake Q
 142 *
 143 *	FIXES:
 144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
 145 *	- Stratus87247: protect MDI control register manipulations
 146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
 147 *      - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
 148 */
 149
 150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 151
 152#include <linux/hardirq.h>
 153#include <linux/interrupt.h>
 154#include <linux/module.h>
 155#include <linux/moduleparam.h>
 156#include <linux/kernel.h>
 157#include <linux/types.h>
 158#include <linux/sched.h>
 159#include <linux/slab.h>
 160#include <linux/delay.h>
 161#include <linux/init.h>
 162#include <linux/pci.h>
 163#include <linux/dma-mapping.h>
 164#include <linux/dmapool.h>
 165#include <linux/netdevice.h>
 166#include <linux/etherdevice.h>
 167#include <linux/mii.h>
 168#include <linux/if_vlan.h>
 169#include <linux/skbuff.h>
 170#include <linux/ethtool.h>
 171#include <linux/string.h>
 172#include <linux/firmware.h>
 173#include <linux/rtnetlink.h>
 174#include <asm/unaligned.h>
 175
 176
 177#define DRV_NAME		"e100"
 178#define DRV_EXT			"-NAPI"
 179#define DRV_VERSION		"3.5.24-k2"DRV_EXT
 180#define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
 181#define DRV_COPYRIGHT		"Copyright(c) 1999-2006 Intel Corporation"
 182
 183#define E100_WATCHDOG_PERIOD	(2 * HZ)
 184#define E100_NAPI_WEIGHT	16
 185
 186#define FIRMWARE_D101M		"e100/d101m_ucode.bin"
 187#define FIRMWARE_D101S		"e100/d101s_ucode.bin"
 188#define FIRMWARE_D102E		"e100/d102e_ucode.bin"
 189
 190MODULE_DESCRIPTION(DRV_DESCRIPTION);
 191MODULE_AUTHOR(DRV_COPYRIGHT);
 192MODULE_LICENSE("GPL");
 193MODULE_VERSION(DRV_VERSION);
 194MODULE_FIRMWARE(FIRMWARE_D101M);
 195MODULE_FIRMWARE(FIRMWARE_D101S);
 196MODULE_FIRMWARE(FIRMWARE_D102E);
 197
 198static int debug = 3;
 199static int eeprom_bad_csum_allow = 0;
 200static int use_io = 0;
 201module_param(debug, int, 0);
 202module_param(eeprom_bad_csum_allow, int, 0);
 203module_param(use_io, int, 0);
 204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
 206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
 207
 208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
 209	PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
 210	PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
 211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
 212	INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
 213	INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
 214	INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
 215	INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
 216	INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
 217	INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
 218	INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
 219	INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
 220	INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
 221	INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
 222	INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
 223	INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
 224	INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
 225	INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
 226	INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
 227	INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
 228	INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
 229	INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
 230	INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
 231	INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
 232	INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
 233	INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
 234	INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
 235	INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
 236	INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
 237	INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
 238	INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
 239	INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
 240	INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
 241	INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
 242	INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
 243	INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
 244	INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
 245	INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
 246	INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
 247	INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
 248	INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
 249	INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
 250	INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
 251	INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
 252	INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
 253	INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
 254	{ 0, }
 255};
 256MODULE_DEVICE_TABLE(pci, e100_id_table);
 257
 258enum mac {
 259	mac_82557_D100_A  = 0,
 260	mac_82557_D100_B  = 1,
 261	mac_82557_D100_C  = 2,
 262	mac_82558_D101_A4 = 4,
 263	mac_82558_D101_B0 = 5,
 264	mac_82559_D101M   = 8,
 265	mac_82559_D101S   = 9,
 266	mac_82550_D102    = 12,
 267	mac_82550_D102_C  = 13,
 268	mac_82551_E       = 14,
 269	mac_82551_F       = 15,
 270	mac_82551_10      = 16,
 271	mac_unknown       = 0xFF,
 272};
 273
 274enum phy {
 275	phy_100a     = 0x000003E0,
 276	phy_100c     = 0x035002A8,
 277	phy_82555_tx = 0x015002A8,
 278	phy_nsc_tx   = 0x5C002000,
 279	phy_82562_et = 0x033002A8,
 280	phy_82562_em = 0x032002A8,
 281	phy_82562_ek = 0x031002A8,
 282	phy_82562_eh = 0x017002A8,
 283	phy_82552_v  = 0xd061004d,
 284	phy_unknown  = 0xFFFFFFFF,
 285};
 286
 287/* CSR (Control/Status Registers) */
 288struct csr {
 289	struct {
 290		u8 status;
 291		u8 stat_ack;
 292		u8 cmd_lo;
 293		u8 cmd_hi;
 294		u32 gen_ptr;
 295	} scb;
 296	u32 port;
 297	u16 flash_ctrl;
 298	u8 eeprom_ctrl_lo;
 299	u8 eeprom_ctrl_hi;
 300	u32 mdi_ctrl;
 301	u32 rx_dma_count;
 302};
 303
 304enum scb_status {
 305	rus_no_res       = 0x08,
 306	rus_ready        = 0x10,
 307	rus_mask         = 0x3C,
 308};
 309
 310enum ru_state  {
 311	RU_SUSPENDED = 0,
 312	RU_RUNNING	 = 1,
 313	RU_UNINITIALIZED = -1,
 314};
 315
 316enum scb_stat_ack {
 317	stat_ack_not_ours    = 0x00,
 318	stat_ack_sw_gen      = 0x04,
 319	stat_ack_rnr         = 0x10,
 320	stat_ack_cu_idle     = 0x20,
 321	stat_ack_frame_rx    = 0x40,
 322	stat_ack_cu_cmd_done = 0x80,
 323	stat_ack_not_present = 0xFF,
 324	stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
 325	stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
 326};
 327
 328enum scb_cmd_hi {
 329	irq_mask_none = 0x00,
 330	irq_mask_all  = 0x01,
 331	irq_sw_gen    = 0x02,
 332};
 333
 334enum scb_cmd_lo {
 335	cuc_nop        = 0x00,
 336	ruc_start      = 0x01,
 337	ruc_load_base  = 0x06,
 338	cuc_start      = 0x10,
 339	cuc_resume     = 0x20,
 340	cuc_dump_addr  = 0x40,
 341	cuc_dump_stats = 0x50,
 342	cuc_load_base  = 0x60,
 343	cuc_dump_reset = 0x70,
 344};
 345
 346enum cuc_dump {
 347	cuc_dump_complete       = 0x0000A005,
 348	cuc_dump_reset_complete = 0x0000A007,
 349};
 350
 351enum port {
 352	software_reset  = 0x0000,
 353	selftest        = 0x0001,
 354	selective_reset = 0x0002,
 355};
 356
 357enum eeprom_ctrl_lo {
 358	eesk = 0x01,
 359	eecs = 0x02,
 360	eedi = 0x04,
 361	eedo = 0x08,
 362};
 363
 364enum mdi_ctrl {
 365	mdi_write = 0x04000000,
 366	mdi_read  = 0x08000000,
 367	mdi_ready = 0x10000000,
 368};
 369
 370enum eeprom_op {
 371	op_write = 0x05,
 372	op_read  = 0x06,
 373	op_ewds  = 0x10,
 374	op_ewen  = 0x13,
 375};
 376
 377enum eeprom_offsets {
 378	eeprom_cnfg_mdix  = 0x03,
 379	eeprom_phy_iface  = 0x06,
 380	eeprom_id         = 0x0A,
 381	eeprom_config_asf = 0x0D,
 382	eeprom_smbus_addr = 0x90,
 383};
 384
 385enum eeprom_cnfg_mdix {
 386	eeprom_mdix_enabled = 0x0080,
 387};
 388
 389enum eeprom_phy_iface {
 390	NoSuchPhy = 0,
 391	I82553AB,
 392	I82553C,
 393	I82503,
 394	DP83840,
 395	S80C240,
 396	S80C24,
 397	I82555,
 398	DP83840A = 10,
 399};
 400
 401enum eeprom_id {
 402	eeprom_id_wol = 0x0020,
 403};
 404
 405enum eeprom_config_asf {
 406	eeprom_asf = 0x8000,
 407	eeprom_gcl = 0x4000,
 408};
 409
 410enum cb_status {
 411	cb_complete = 0x8000,
 412	cb_ok       = 0x2000,
 413};
 414
 415enum cb_command {
 416	cb_nop    = 0x0000,
 417	cb_iaaddr = 0x0001,
 418	cb_config = 0x0002,
 419	cb_multi  = 0x0003,
 420	cb_tx     = 0x0004,
 421	cb_ucode  = 0x0005,
 422	cb_dump   = 0x0006,
 423	cb_tx_sf  = 0x0008,
 424	cb_cid    = 0x1f00,
 425	cb_i      = 0x2000,
 426	cb_s      = 0x4000,
 427	cb_el     = 0x8000,
 428};
 429
 430struct rfd {
 431	__le16 status;
 432	__le16 command;
 433	__le32 link;
 434	__le32 rbd;
 435	__le16 actual_size;
 436	__le16 size;
 437};
 438
 439struct rx {
 440	struct rx *next, *prev;
 441	struct sk_buff *skb;
 442	dma_addr_t dma_addr;
 443};
 444
 445#if defined(__BIG_ENDIAN_BITFIELD)
 446#define X(a,b)	b,a
 447#else
 448#define X(a,b)	a,b
 449#endif
 450struct config {
 451/*0*/	u8 X(byte_count:6, pad0:2);
 452/*1*/	u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
 453/*2*/	u8 adaptive_ifs;
 454/*3*/	u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
 455	   term_write_cache_line:1), pad3:4);
 456/*4*/	u8 X(rx_dma_max_count:7, pad4:1);
 457/*5*/	u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
 458/*6*/	u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
 459	   tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
 460	   rx_discard_overruns:1), rx_save_bad_frames:1);
 461/*7*/	u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
 462	   pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
 463	   tx_dynamic_tbd:1);
 464/*8*/	u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
 465/*9*/	u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
 466	   link_status_wake:1), arp_wake:1), mcmatch_wake:1);
 467/*10*/	u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
 468	   loopback:2);
 469/*11*/	u8 X(linear_priority:3, pad11:5);
 470/*12*/	u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
 471/*13*/	u8 ip_addr_lo;
 472/*14*/	u8 ip_addr_hi;
 473/*15*/	u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
 474	   wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
 475	   pad15_2:1), crs_or_cdt:1);
 476/*16*/	u8 fc_delay_lo;
 477/*17*/	u8 fc_delay_hi;
 478/*18*/	u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
 479	   rx_long_ok:1), fc_priority_threshold:3), pad18:1);
 480/*19*/	u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
 481	   fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
 482	   full_duplex_force:1), full_duplex_pin:1);
 483/*20*/	u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
 484/*21*/	u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
 485/*22*/	u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
 486	u8 pad_d102[9];
 487};
 488
 489#define E100_MAX_MULTICAST_ADDRS	64
 490struct multi {
 491	__le16 count;
 492	u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
 493};
 494
 495/* Important: keep total struct u32-aligned */
 496#define UCODE_SIZE			134
 497struct cb {
 498	__le16 status;
 499	__le16 command;
 500	__le32 link;
 501	union {
 502		u8 iaaddr[ETH_ALEN];
 503		__le32 ucode[UCODE_SIZE];
 504		struct config config;
 505		struct multi multi;
 506		struct {
 507			u32 tbd_array;
 508			u16 tcb_byte_count;
 509			u8 threshold;
 510			u8 tbd_count;
 511			struct {
 512				__le32 buf_addr;
 513				__le16 size;
 514				u16 eol;
 515			} tbd;
 516		} tcb;
 517		__le32 dump_buffer_addr;
 518	} u;
 519	struct cb *next, *prev;
 520	dma_addr_t dma_addr;
 521	struct sk_buff *skb;
 522};
 523
 524enum loopback {
 525	lb_none = 0, lb_mac = 1, lb_phy = 3,
 526};
 527
 528struct stats {
 529	__le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
 530		tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
 531		tx_multiple_collisions, tx_total_collisions;
 532	__le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
 533		rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
 534		rx_short_frame_errors;
 535	__le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
 536	__le16 xmt_tco_frames, rcv_tco_frames;
 537	__le32 complete;
 538};
 539
 540struct mem {
 541	struct {
 542		u32 signature;
 543		u32 result;
 544	} selftest;
 545	struct stats stats;
 546	u8 dump_buf[596];
 547};
 548
 549struct param_range {
 550	u32 min;
 551	u32 max;
 552	u32 count;
 553};
 554
 555struct params {
 556	struct param_range rfds;
 557	struct param_range cbs;
 558};
 559
 560struct nic {
 561	/* Begin: frequently used values: keep adjacent for cache effect */
 562	u32 msg_enable				____cacheline_aligned;
 563	struct net_device *netdev;
 564	struct pci_dev *pdev;
 565	u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
 566
 567	struct rx *rxs				____cacheline_aligned;
 568	struct rx *rx_to_use;
 569	struct rx *rx_to_clean;
 570	struct rfd blank_rfd;
 571	enum ru_state ru_running;
 572
 573	spinlock_t cb_lock			____cacheline_aligned;
 574	spinlock_t cmd_lock;
 575	struct csr __iomem *csr;
 576	enum scb_cmd_lo cuc_cmd;
 577	unsigned int cbs_avail;
 578	struct napi_struct napi;
 579	struct cb *cbs;
 580	struct cb *cb_to_use;
 581	struct cb *cb_to_send;
 582	struct cb *cb_to_clean;
 583	__le16 tx_command;
 584	/* End: frequently used values: keep adjacent for cache effect */
 585
 586	enum {
 587		ich                = (1 << 0),
 588		promiscuous        = (1 << 1),
 589		multicast_all      = (1 << 2),
 590		wol_magic          = (1 << 3),
 591		ich_10h_workaround = (1 << 4),
 592	} flags					____cacheline_aligned;
 593
 594	enum mac mac;
 595	enum phy phy;
 596	struct params params;
 597	struct timer_list watchdog;
 598	struct mii_if_info mii;
 599	struct work_struct tx_timeout_task;
 600	enum loopback loopback;
 601
 602	struct mem *mem;
 603	dma_addr_t dma_addr;
 604
 605	struct pci_pool *cbs_pool;
 606	dma_addr_t cbs_dma_addr;
 607	u8 adaptive_ifs;
 608	u8 tx_threshold;
 609	u32 tx_frames;
 610	u32 tx_collisions;
 611	u32 tx_deferred;
 612	u32 tx_single_collisions;
 613	u32 tx_multiple_collisions;
 614	u32 tx_fc_pause;
 615	u32 tx_tco_frames;
 616
 617	u32 rx_fc_pause;
 618	u32 rx_fc_unsupported;
 619	u32 rx_tco_frames;
 620	u32 rx_over_length_errors;
 621
 622	u16 eeprom_wc;
 623	__le16 eeprom[256];
 624	spinlock_t mdio_lock;
 625	const struct firmware *fw;
 626};
 627
 628static inline void e100_write_flush(struct nic *nic)
 629{
 630	/* Flush previous PCI writes through intermediate bridges
 631	 * by doing a benign read */
 632	(void)ioread8(&nic->csr->scb.status);
 633}
 634
 635static void e100_enable_irq(struct nic *nic)
 636{
 637	unsigned long flags;
 638
 639	spin_lock_irqsave(&nic->cmd_lock, flags);
 640	iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
 641	e100_write_flush(nic);
 642	spin_unlock_irqrestore(&nic->cmd_lock, flags);
 643}
 644
 645static void e100_disable_irq(struct nic *nic)
 646{
 647	unsigned long flags;
 648
 649	spin_lock_irqsave(&nic->cmd_lock, flags);
 650	iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
 651	e100_write_flush(nic);
 652	spin_unlock_irqrestore(&nic->cmd_lock, flags);
 653}
 654
 655static void e100_hw_reset(struct nic *nic)
 656{
 657	/* Put CU and RU into idle with a selective reset to get
 658	 * device off of PCI bus */
 659	iowrite32(selective_reset, &nic->csr->port);
 660	e100_write_flush(nic); udelay(20);
 661
 662	/* Now fully reset device */
 663	iowrite32(software_reset, &nic->csr->port);
 664	e100_write_flush(nic); udelay(20);
 665
 666	/* Mask off our interrupt line - it's unmasked after reset */
 667	e100_disable_irq(nic);
 668}
 669
 670static int e100_self_test(struct nic *nic)
 671{
 672	u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
 673
 674	/* Passing the self-test is a pretty good indication
 675	 * that the device can DMA to/from host memory */
 676
 677	nic->mem->selftest.signature = 0;
 678	nic->mem->selftest.result = 0xFFFFFFFF;
 679
 680	iowrite32(selftest | dma_addr, &nic->csr->port);
 681	e100_write_flush(nic);
 682	/* Wait 10 msec for self-test to complete */
 683	msleep(10);
 684
 685	/* Interrupts are enabled after self-test */
 686	e100_disable_irq(nic);
 687
 688	/* Check results of self-test */
 689	if (nic->mem->selftest.result != 0) {
 690		netif_err(nic, hw, nic->netdev,
 691			  "Self-test failed: result=0x%08X\n",
 692			  nic->mem->selftest.result);
 693		return -ETIMEDOUT;
 694	}
 695	if (nic->mem->selftest.signature == 0) {
 696		netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
 697		return -ETIMEDOUT;
 698	}
 699
 700	return 0;
 701}
 702
 703static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
 704{
 705	u32 cmd_addr_data[3];
 706	u8 ctrl;
 707	int i, j;
 708
 709	/* Three cmds: write/erase enable, write data, write/erase disable */
 710	cmd_addr_data[0] = op_ewen << (addr_len - 2);
 711	cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
 712		le16_to_cpu(data);
 713	cmd_addr_data[2] = op_ewds << (addr_len - 2);
 714
 715	/* Bit-bang cmds to write word to eeprom */
 716	for (j = 0; j < 3; j++) {
 717
 718		/* Chip select */
 719		iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
 720		e100_write_flush(nic); udelay(4);
 721
 722		for (i = 31; i >= 0; i--) {
 723			ctrl = (cmd_addr_data[j] & (1 << i)) ?
 724				eecs | eedi : eecs;
 725			iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
 726			e100_write_flush(nic); udelay(4);
 727
 728			iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
 729			e100_write_flush(nic); udelay(4);
 730		}
 731		/* Wait 10 msec for cmd to complete */
 732		msleep(10);
 733
 734		/* Chip deselect */
 735		iowrite8(0, &nic->csr->eeprom_ctrl_lo);
 736		e100_write_flush(nic); udelay(4);
 737	}
 738};
 739
 740/* General technique stolen from the eepro100 driver - very clever */
 741static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
 742{
 743	u32 cmd_addr_data;
 744	u16 data = 0;
 745	u8 ctrl;
 746	int i;
 747
 748	cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
 749
 750	/* Chip select */
 751	iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
 752	e100_write_flush(nic); udelay(4);
 753
 754	/* Bit-bang to read word from eeprom */
 755	for (i = 31; i >= 0; i--) {
 756		ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
 757		iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
 758		e100_write_flush(nic); udelay(4);
 759
 760		iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
 761		e100_write_flush(nic); udelay(4);
 762
 763		/* Eeprom drives a dummy zero to EEDO after receiving
 764		 * complete address.  Use this to adjust addr_len. */
 765		ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
 766		if (!(ctrl & eedo) && i > 16) {
 767			*addr_len -= (i - 16);
 768			i = 17;
 769		}
 770
 771		data = (data << 1) | (ctrl & eedo ? 1 : 0);
 772	}
 773
 774	/* Chip deselect */
 775	iowrite8(0, &nic->csr->eeprom_ctrl_lo);
 776	e100_write_flush(nic); udelay(4);
 777
 778	return cpu_to_le16(data);
 779};
 780
 781/* Load entire EEPROM image into driver cache and validate checksum */
 782static int e100_eeprom_load(struct nic *nic)
 783{
 784	u16 addr, addr_len = 8, checksum = 0;
 785
 786	/* Try reading with an 8-bit addr len to discover actual addr len */
 787	e100_eeprom_read(nic, &addr_len, 0);
 788	nic->eeprom_wc = 1 << addr_len;
 789
 790	for (addr = 0; addr < nic->eeprom_wc; addr++) {
 791		nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
 792		if (addr < nic->eeprom_wc - 1)
 793			checksum += le16_to_cpu(nic->eeprom[addr]);
 794	}
 795
 796	/* The checksum, stored in the last word, is calculated such that
 797	 * the sum of words should be 0xBABA */
 798	if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
 799		netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
 800		if (!eeprom_bad_csum_allow)
 801			return -EAGAIN;
 802	}
 803
 804	return 0;
 805}
 806
 807/* Save (portion of) driver EEPROM cache to device and update checksum */
 808static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
 809{
 810	u16 addr, addr_len = 8, checksum = 0;
 811
 812	/* Try reading with an 8-bit addr len to discover actual addr len */
 813	e100_eeprom_read(nic, &addr_len, 0);
 814	nic->eeprom_wc = 1 << addr_len;
 815
 816	if (start + count >= nic->eeprom_wc)
 817		return -EINVAL;
 818
 819	for (addr = start; addr < start + count; addr++)
 820		e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
 821
 822	/* The checksum, stored in the last word, is calculated such that
 823	 * the sum of words should be 0xBABA */
 824	for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
 825		checksum += le16_to_cpu(nic->eeprom[addr]);
 826	nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
 827	e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
 828		nic->eeprom[nic->eeprom_wc - 1]);
 829
 830	return 0;
 831}
 832
 833#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
 834#define E100_WAIT_SCB_FAST 20       /* delay like the old code */
 835static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
 836{
 837	unsigned long flags;
 838	unsigned int i;
 839	int err = 0;
 840
 841	spin_lock_irqsave(&nic->cmd_lock, flags);
 842
 843	/* Previous command is accepted when SCB clears */
 844	for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
 845		if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
 846			break;
 847		cpu_relax();
 848		if (unlikely(i > E100_WAIT_SCB_FAST))
 849			udelay(5);
 850	}
 851	if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
 852		err = -EAGAIN;
 853		goto err_unlock;
 854	}
 855
 856	if (unlikely(cmd != cuc_resume))
 857		iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
 858	iowrite8(cmd, &nic->csr->scb.cmd_lo);
 859
 860err_unlock:
 861	spin_unlock_irqrestore(&nic->cmd_lock, flags);
 862
 863	return err;
 864}
 865
 866static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
 867	void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
 868{
 869	struct cb *cb;
 870	unsigned long flags;
 871	int err = 0;
 872
 873	spin_lock_irqsave(&nic->cb_lock, flags);
 874
 875	if (unlikely(!nic->cbs_avail)) {
 876		err = -ENOMEM;
 877		goto err_unlock;
 878	}
 879
 880	cb = nic->cb_to_use;
 881	nic->cb_to_use = cb->next;
 882	nic->cbs_avail--;
 883	cb->skb = skb;
 884
 885	if (unlikely(!nic->cbs_avail))
 886		err = -ENOSPC;
 887
 888	cb_prepare(nic, cb, skb);
 889
 890	/* Order is important otherwise we'll be in a race with h/w:
 891	 * set S-bit in current first, then clear S-bit in previous. */
 892	cb->command |= cpu_to_le16(cb_s);
 893	wmb();
 894	cb->prev->command &= cpu_to_le16(~cb_s);
 895
 896	while (nic->cb_to_send != nic->cb_to_use) {
 897		if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
 898			nic->cb_to_send->dma_addr))) {
 899			/* Ok, here's where things get sticky.  It's
 900			 * possible that we can't schedule the command
 901			 * because the controller is too busy, so
 902			 * let's just queue the command and try again
 903			 * when another command is scheduled. */
 904			if (err == -ENOSPC) {
 905				//request a reset
 906				schedule_work(&nic->tx_timeout_task);
 907			}
 908			break;
 909		} else {
 910			nic->cuc_cmd = cuc_resume;
 911			nic->cb_to_send = nic->cb_to_send->next;
 912		}
 913	}
 914
 915err_unlock:
 916	spin_unlock_irqrestore(&nic->cb_lock, flags);
 917
 918	return err;
 919}
 920
 921static int mdio_read(struct net_device *netdev, int addr, int reg)
 922{
 923	struct nic *nic = netdev_priv(netdev);
 924	return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
 925}
 926
 927static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
 928{
 929	struct nic *nic = netdev_priv(netdev);
 930
 931	nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
 932}
 933
 934/* the standard mdio_ctrl() function for usual MII-compliant hardware */
 935static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
 936{
 937	u32 data_out = 0;
 938	unsigned int i;
 939	unsigned long flags;
 940
 941
 942	/*
 943	 * Stratus87247: we shouldn't be writing the MDI control
 944	 * register until the Ready bit shows True.  Also, since
 945	 * manipulation of the MDI control registers is a multi-step
 946	 * procedure it should be done under lock.
 947	 */
 948	spin_lock_irqsave(&nic->mdio_lock, flags);
 949	for (i = 100; i; --i) {
 950		if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
 951			break;
 952		udelay(20);
 953	}
 954	if (unlikely(!i)) {
 955		netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
 956		spin_unlock_irqrestore(&nic->mdio_lock, flags);
 957		return 0;		/* No way to indicate timeout error */
 958	}
 959	iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
 960
 961	for (i = 0; i < 100; i++) {
 962		udelay(20);
 963		if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
 964			break;
 965	}
 966	spin_unlock_irqrestore(&nic->mdio_lock, flags);
 967	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
 968		     "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
 969		     dir == mdi_read ? "READ" : "WRITE",
 970		     addr, reg, data, data_out);
 971	return (u16)data_out;
 972}
 973
 974/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
 975static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
 976				 u32 addr,
 977				 u32 dir,
 978				 u32 reg,
 979				 u16 data)
 980{
 981	if ((reg == MII_BMCR) && (dir == mdi_write)) {
 982		if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
 983			u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
 984							MII_ADVERTISE);
 985
 986			/*
 987			 * Workaround Si issue where sometimes the part will not
 988			 * autoneg to 100Mbps even when advertised.
 989			 */
 990			if (advert & ADVERTISE_100FULL)
 991				data |= BMCR_SPEED100 | BMCR_FULLDPLX;
 992			else if (advert & ADVERTISE_100HALF)
 993				data |= BMCR_SPEED100;
 994		}
 995	}
 996	return mdio_ctrl_hw(nic, addr, dir, reg, data);
 997}
 998
 999/* Fully software-emulated mdio_ctrl() function for cards without
1000 * MII-compliant PHYs.
1001 * For now, this is mainly geared towards 80c24 support; in case of further
1002 * requirements for other types (i82503, ...?) either extend this mechanism
1003 * or split it, whichever is cleaner.
1004 */
1005static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1006				      u32 addr,
1007				      u32 dir,
1008				      u32 reg,
1009				      u16 data)
1010{
1011	/* might need to allocate a netdev_priv'ed register array eventually
1012	 * to be able to record state changes, but for now
1013	 * some fully hardcoded register handling ought to be ok I guess. */
1014
1015	if (dir == mdi_read) {
1016		switch (reg) {
1017		case MII_BMCR:
1018			/* Auto-negotiation, right? */
1019			return  BMCR_ANENABLE |
1020				BMCR_FULLDPLX;
1021		case MII_BMSR:
1022			return	BMSR_LSTATUS /* for mii_link_ok() */ |
1023				BMSR_ANEGCAPABLE |
1024				BMSR_10FULL;
1025		case MII_ADVERTISE:
1026			/* 80c24 is a "combo card" PHY, right? */
1027			return	ADVERTISE_10HALF |
1028				ADVERTISE_10FULL;
1029		default:
1030			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1031				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1032				     dir == mdi_read ? "READ" : "WRITE",
1033				     addr, reg, data);
1034			return 0xFFFF;
1035		}
1036	} else {
1037		switch (reg) {
1038		default:
1039			netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040				     "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041				     dir == mdi_read ? "READ" : "WRITE",
1042				     addr, reg, data);
1043			return 0xFFFF;
1044		}
1045	}
1046}
1047static inline int e100_phy_supports_mii(struct nic *nic)
1048{
1049	/* for now, just check it by comparing whether we
1050	   are using MII software emulation.
1051	*/
1052	return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1053}
1054
1055static void e100_get_defaults(struct nic *nic)
1056{
1057	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1058	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
1059
1060	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1061	nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1062	if (nic->mac == mac_unknown)
1063		nic->mac = mac_82557_D100_A;
1064
1065	nic->params.rfds = rfds;
1066	nic->params.cbs = cbs;
1067
1068	/* Quadwords to DMA into FIFO before starting frame transmit */
1069	nic->tx_threshold = 0xE0;
1070
1071	/* no interrupt for every tx completion, delay = 256us if not 557 */
1072	nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1073		((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1074
1075	/* Template for a freshly allocated RFD */
1076	nic->blank_rfd.command = 0;
1077	nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1078	nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
1079
1080	/* MII setup */
1081	nic->mii.phy_id_mask = 0x1F;
1082	nic->mii.reg_num_mask = 0x1F;
1083	nic->mii.dev = nic->netdev;
1084	nic->mii.mdio_read = mdio_read;
1085	nic->mii.mdio_write = mdio_write;
1086}
1087
1088static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1089{
1090	struct config *config = &cb->u.config;
1091	u8 *c = (u8 *)config;
1092
1093	cb->command = cpu_to_le16(cb_config);
1094
1095	memset(config, 0, sizeof(struct config));
1096
1097	config->byte_count = 0x16;		/* bytes in this struct */
1098	config->rx_fifo_limit = 0x8;		/* bytes in FIFO before DMA */
1099	config->direct_rx_dma = 0x1;		/* reserved */
1100	config->standard_tcb = 0x1;		/* 1=standard, 0=extended */
1101	config->standard_stat_counter = 0x1;	/* 1=standard, 0=extended */
1102	config->rx_discard_short_frames = 0x1;	/* 1=discard, 0=pass */
1103	config->tx_underrun_retry = 0x3;	/* # of underrun retries */
1104	if (e100_phy_supports_mii(nic))
1105		config->mii_mode = 1;           /* 1=MII mode, 0=i82503 mode */
1106	config->pad10 = 0x6;
1107	config->no_source_addr_insertion = 0x1;	/* 1=no, 0=yes */
1108	config->preamble_length = 0x2;		/* 0=1, 1=3, 2=7, 3=15 bytes */
1109	config->ifs = 0x6;			/* x16 = inter frame spacing */
1110	config->ip_addr_hi = 0xF2;		/* ARP IP filter - not used */
1111	config->pad15_1 = 0x1;
1112	config->pad15_2 = 0x1;
1113	config->crs_or_cdt = 0x0;		/* 0=CRS only, 1=CRS or CDT */
1114	config->fc_delay_hi = 0x40;		/* time delay for fc frame */
1115	config->tx_padding = 0x1;		/* 1=pad short frames */
1116	config->fc_priority_threshold = 0x7;	/* 7=priority fc disabled */
1117	config->pad18 = 0x1;
1118	config->full_duplex_pin = 0x1;		/* 1=examine FDX# pin */
1119	config->pad20_1 = 0x1F;
1120	config->fc_priority_location = 0x1;	/* 1=byte#31, 0=byte#19 */
1121	config->pad21_1 = 0x5;
1122
1123	config->adaptive_ifs = nic->adaptive_ifs;
1124	config->loopback = nic->loopback;
1125
1126	if (nic->mii.force_media && nic->mii.full_duplex)
1127		config->full_duplex_force = 0x1;	/* 1=force, 0=auto */
1128
1129	if (nic->flags & promiscuous || nic->loopback) {
1130		config->rx_save_bad_frames = 0x1;	/* 1=save, 0=discard */
1131		config->rx_discard_short_frames = 0x0;	/* 1=discard, 0=save */
1132		config->promiscuous_mode = 0x1;		/* 1=on, 0=off */
1133	}
1134
1135	if (nic->flags & multicast_all)
1136		config->multicast_all = 0x1;		/* 1=accept, 0=no */
1137
1138	/* disable WoL when up */
1139	if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1140		config->magic_packet_disable = 0x1;	/* 1=off, 0=on */
1141
1142	if (nic->mac >= mac_82558_D101_A4) {
1143		config->fc_disable = 0x1;	/* 1=Tx fc off, 0=Tx fc on */
1144		config->mwi_enable = 0x1;	/* 1=enable, 0=disable */
1145		config->standard_tcb = 0x0;	/* 1=standard, 0=extended */
1146		config->rx_long_ok = 0x1;	/* 1=VLANs ok, 0=standard */
1147		if (nic->mac >= mac_82559_D101M) {
1148			config->tno_intr = 0x1;		/* TCO stats enable */
1149			/* Enable TCO in extended config */
1150			if (nic->mac >= mac_82551_10) {
1151				config->byte_count = 0x20; /* extended bytes */
1152				config->rx_d102_mode = 0x1; /* GMRC for TCO */
1153			}
1154		} else {
1155			config->standard_stat_counter = 0x0;
1156		}
1157	}
1158
1159	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1160		     "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1161		     c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1162	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1163		     "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1164		     c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1165	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1166		     "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1167		     c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1168}
1169
1170/*************************************************************************
1171*  CPUSaver parameters
1172*
1173*  All CPUSaver parameters are 16-bit literals that are part of a
1174*  "move immediate value" instruction.  By changing the value of
1175*  the literal in the instruction before the code is loaded, the
1176*  driver can change the algorithm.
1177*
1178*  INTDELAY - This loads the dead-man timer with its initial value.
1179*    When this timer expires the interrupt is asserted, and the
1180*    timer is reset each time a new packet is received.  (see
1181*    BUNDLEMAX below to set the limit on number of chained packets)
1182*    The current default is 0x600 or 1536.  Experiments show that
1183*    the value should probably stay within the 0x200 - 0x1000.
1184*
1185*  BUNDLEMAX -
1186*    This sets the maximum number of frames that will be bundled.  In
1187*    some situations, such as the TCP windowing algorithm, it may be
1188*    better to limit the growth of the bundle size than let it go as
1189*    high as it can, because that could cause too much added latency.
1190*    The default is six, because this is the number of packets in the
1191*    default TCP window size.  A value of 1 would make CPUSaver indicate
1192*    an interrupt for every frame received.  If you do not want to put
1193*    a limit on the bundle size, set this value to xFFFF.
1194*
1195*  BUNDLESMALL -
1196*    This contains a bit-mask describing the minimum size frame that
1197*    will be bundled.  The default masks the lower 7 bits, which means
1198*    that any frame less than 128 bytes in length will not be bundled,
1199*    but will instead immediately generate an interrupt.  This does
1200*    not affect the current bundle in any way.  Any frame that is 128
1201*    bytes or large will be bundled normally.  This feature is meant
1202*    to provide immediate indication of ACK frames in a TCP environment.
1203*    Customers were seeing poor performance when a machine with CPUSaver
1204*    enabled was sending but not receiving.  The delay introduced when
1205*    the ACKs were received was enough to reduce total throughput, because
1206*    the sender would sit idle until the ACK was finally seen.
1207*
1208*    The current default is 0xFF80, which masks out the lower 7 bits.
1209*    This means that any frame which is x7F (127) bytes or smaller
1210*    will cause an immediate interrupt.  Because this value must be a
1211*    bit mask, there are only a few valid values that can be used.  To
1212*    turn this feature off, the driver can write the value xFFFF to the
1213*    lower word of this instruction (in the same way that the other
1214*    parameters are used).  Likewise, a value of 0xF800 (2047) would
1215*    cause an interrupt to be generated for every frame, because all
1216*    standard Ethernet frames are <= 2047 bytes in length.
1217*************************************************************************/
1218
1219/* if you wish to disable the ucode functionality, while maintaining the
1220 * workarounds it provides, set the following defines to:
1221 * BUNDLESMALL 0
1222 * BUNDLEMAX 1
1223 * INTDELAY 1
1224 */
1225#define BUNDLESMALL 1
1226#define BUNDLEMAX (u16)6
1227#define INTDELAY (u16)1536 /* 0x600 */
1228
1229/* Initialize firmware */
1230static const struct firmware *e100_request_firmware(struct nic *nic)
1231{
1232	const char *fw_name;
1233	const struct firmware *fw = nic->fw;
1234	u8 timer, bundle, min_size;
1235	int err = 0;
1236
1237	/* do not load u-code for ICH devices */
1238	if (nic->flags & ich)
1239		return NULL;
1240
1241	/* Search for ucode match against h/w revision */
1242	if (nic->mac == mac_82559_D101M)
1243		fw_name = FIRMWARE_D101M;
1244	else if (nic->mac == mac_82559_D101S)
1245		fw_name = FIRMWARE_D101S;
1246	else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1247		fw_name = FIRMWARE_D102E;
1248	else /* No ucode on other devices */
1249		return NULL;
1250
1251	/* If the firmware has not previously been loaded, request a pointer
1252	 * to it. If it was previously loaded, we are reinitializing the
1253	 * adapter, possibly in a resume from hibernate, in which case
1254	 * request_firmware() cannot be used.
1255	 */
1256	if (!fw)
1257		err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1258
1259	if (err) {
1260		netif_err(nic, probe, nic->netdev,
1261			  "Failed to load firmware \"%s\": %d\n",
1262			  fw_name, err);
1263		return ERR_PTR(err);
1264	}
1265
1266	/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1267	   indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1268	if (fw->size != UCODE_SIZE * 4 + 3) {
1269		netif_err(nic, probe, nic->netdev,
1270			  "Firmware \"%s\" has wrong size %zu\n",
1271			  fw_name, fw->size);
1272		release_firmware(fw);
1273		return ERR_PTR(-EINVAL);
1274	}
1275
1276	/* Read timer, bundle and min_size from end of firmware blob */
1277	timer = fw->data[UCODE_SIZE * 4];
1278	bundle = fw->data[UCODE_SIZE * 4 + 1];
1279	min_size = fw->data[UCODE_SIZE * 4 + 2];
1280
1281	if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1282	    min_size >= UCODE_SIZE) {
1283		netif_err(nic, probe, nic->netdev,
1284			  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1285			  fw_name, timer, bundle, min_size);
1286		release_firmware(fw);
1287		return ERR_PTR(-EINVAL);
1288	}
1289
1290	/* OK, firmware is validated and ready to use. Save a pointer
1291	 * to it in the nic */
1292	nic->fw = fw;
1293	return fw;
1294}
1295
1296static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1297			     struct sk_buff *skb)
1298{
1299	const struct firmware *fw = (void *)skb;
1300	u8 timer, bundle, min_size;
1301
1302	/* It's not a real skb; we just abused the fact that e100_exec_cb
1303	   will pass it through to here... */
1304	cb->skb = NULL;
1305
1306	/* firmware is stored as little endian already */
1307	memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1308
1309	/* Read timer, bundle and min_size from end of firmware blob */
1310	timer = fw->data[UCODE_SIZE * 4];
1311	bundle = fw->data[UCODE_SIZE * 4 + 1];
1312	min_size = fw->data[UCODE_SIZE * 4 + 2];
1313
1314	/* Insert user-tunable settings in cb->u.ucode */
1315	cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1316	cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1317	cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1318	cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1319	cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1320	cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1321
1322	cb->command = cpu_to_le16(cb_ucode | cb_el);
1323}
1324
1325static inline int e100_load_ucode_wait(struct nic *nic)
1326{
1327	const struct firmware *fw;
1328	int err = 0, counter = 50;
1329	struct cb *cb = nic->cb_to_clean;
1330
1331	fw = e100_request_firmware(nic);
1332	/* If it's NULL, then no ucode is required */
1333	if (!fw || IS_ERR(fw))
1334		return PTR_ERR(fw);
1335
1336	if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1337		netif_err(nic, probe, nic->netdev,
1338			  "ucode cmd failed with error %d\n", err);
1339
1340	/* must restart cuc */
1341	nic->cuc_cmd = cuc_start;
1342
1343	/* wait for completion */
1344	e100_write_flush(nic);
1345	udelay(10);
1346
1347	/* wait for possibly (ouch) 500ms */
1348	while (!(cb->status & cpu_to_le16(cb_complete))) {
1349		msleep(10);
1350		if (!--counter) break;
1351	}
1352
1353	/* ack any interrupts, something could have been set */
1354	iowrite8(~0, &nic->csr->scb.stat_ack);
1355
1356	/* if the command failed, or is not OK, notify and return */
1357	if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1358		netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1359		err = -EPERM;
1360	}
1361
1362	return err;
1363}
1364
1365static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1366	struct sk_buff *skb)
1367{
1368	cb->command = cpu_to_le16(cb_iaaddr);
1369	memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1370}
1371
1372static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1373{
1374	cb->command = cpu_to_le16(cb_dump);
1375	cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1376		offsetof(struct mem, dump_buf));
1377}
1378
1379static int e100_phy_check_without_mii(struct nic *nic)
1380{
1381	u8 phy_type;
1382	int without_mii;
1383
1384	phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1385
1386	switch (phy_type) {
1387	case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1388	case I82503: /* Non-MII PHY; UNTESTED! */
1389	case S80C24: /* Non-MII PHY; tested and working */
1390		/* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1391		 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1392		 * doesn't have a programming interface of any sort.  The
1393		 * media is sensed automatically based on how the link partner
1394		 * is configured.  This is, in essence, manual configuration.
1395		 */
1396		netif_info(nic, probe, nic->netdev,
1397			   "found MII-less i82503 or 80c24 or other PHY\n");
1398
1399		nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1400		nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1401
1402		/* these might be needed for certain MII-less cards...
1403		 * nic->flags |= ich;
1404		 * nic->flags |= ich_10h_workaround; */
1405
1406		without_mii = 1;
1407		break;
1408	default:
1409		without_mii = 0;
1410		break;
1411	}
1412	return without_mii;
1413}
1414
1415#define NCONFIG_AUTO_SWITCH	0x0080
1416#define MII_NSC_CONG		MII_RESV1
1417#define NSC_CONG_ENABLE		0x0100
1418#define NSC_CONG_TXREADY	0x0400
1419#define ADVERTISE_FC_SUPPORTED	0x0400
1420static int e100_phy_init(struct nic *nic)
1421{
1422	struct net_device *netdev = nic->netdev;
1423	u32 addr;
1424	u16 bmcr, stat, id_lo, id_hi, cong;
1425
1426	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1427	for (addr = 0; addr < 32; addr++) {
1428		nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1429		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1430		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1431		stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1432		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1433			break;
1434	}
1435	if (addr == 32) {
1436		/* uhoh, no PHY detected: check whether we seem to be some
1437		 * weird, rare variant which is *known* to not have any MII.
1438		 * But do this AFTER MII checking only, since this does
1439		 * lookup of EEPROM values which may easily be unreliable. */
1440		if (e100_phy_check_without_mii(nic))
1441			return 0; /* simply return and hope for the best */
1442		else {
1443			/* for unknown cases log a fatal error */
1444			netif_err(nic, hw, nic->netdev,
1445				  "Failed to locate any known PHY, aborting\n");
1446			return -EAGAIN;
1447		}
1448	} else
1449		netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1450			     "phy_addr = %d\n", nic->mii.phy_id);
1451
1452	/* Get phy ID */
1453	id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1454	id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1455	nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1456	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1457		     "phy ID = 0x%08X\n", nic->phy);
1458
1459	/* Select the phy and isolate the rest */
1460	for (addr = 0; addr < 32; addr++) {
1461		if (addr != nic->mii.phy_id) {
1462			mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1463		} else if (nic->phy != phy_82552_v) {
1464			bmcr = mdio_read(netdev, addr, MII_BMCR);
1465			mdio_write(netdev, addr, MII_BMCR,
1466				bmcr & ~BMCR_ISOLATE);
1467		}
1468	}
1469	/*
1470	 * Workaround for 82552:
1471	 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1472	 * other phy_id's) using bmcr value from addr discovery loop above.
1473	 */
1474	if (nic->phy == phy_82552_v)
1475		mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1476			bmcr & ~BMCR_ISOLATE);
1477
1478	/* Handle National tx phys */
1479#define NCS_PHY_MODEL_MASK	0xFFF0FFFF
1480	if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1481		/* Disable congestion control */
1482		cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1483		cong |= NSC_CONG_TXREADY;
1484		cong &= ~NSC_CONG_ENABLE;
1485		mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1486	}
1487
1488	if (nic->phy == phy_82552_v) {
1489		u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1490
1491		/* assign special tweaked mdio_ctrl() function */
1492		nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1493
1494		/* Workaround Si not advertising flow-control during autoneg */
1495		advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1496		mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1497
1498		/* Reset for the above changes to take effect */
1499		bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1500		bmcr |= BMCR_RESET;
1501		mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1502	} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1503	   (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1504		!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1505		/* enable/disable MDI/MDI-X auto-switching. */
1506		mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1507				nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1508	}
1509
1510	return 0;
1511}
1512
1513static int e100_hw_init(struct nic *nic)
1514{
1515	int err = 0;
1516
1517	e100_hw_reset(nic);
1518
1519	netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1520	if (!in_interrupt() && (err = e100_self_test(nic)))
1521		return err;
1522
1523	if ((err = e100_phy_init(nic)))
1524		return err;
1525	if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1526		return err;
1527	if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1528		return err;
1529	if ((err = e100_load_ucode_wait(nic)))
1530		return err;
1531	if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1532		return err;
1533	if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1534		return err;
1535	if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1536		nic->dma_addr + offsetof(struct mem, stats))))
1537		return err;
1538	if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1539		return err;
1540
1541	e100_disable_irq(nic);
1542
1543	return 0;
1544}
1545
1546static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1547{
1548	struct net_device *netdev = nic->netdev;
1549	struct netdev_hw_addr *ha;
1550	u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1551
1552	cb->command = cpu_to_le16(cb_multi);
1553	cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1554	i = 0;
1555	netdev_for_each_mc_addr(ha, netdev) {
1556		if (i == count)
1557			break;
1558		memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1559			ETH_ALEN);
1560	}
1561}
1562
1563static void e100_set_multicast_list(struct net_device *netdev)
1564{
1565	struct nic *nic = netdev_priv(netdev);
1566
1567	netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1568		     "mc_count=%d, flags=0x%04X\n",
1569		     netdev_mc_count(netdev), netdev->flags);
1570
1571	if (netdev->flags & IFF_PROMISC)
1572		nic->flags |= promiscuous;
1573	else
1574		nic->flags &= ~promiscuous;
1575
1576	if (netdev->flags & IFF_ALLMULTI ||
1577		netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1578		nic->flags |= multicast_all;
1579	else
1580		nic->flags &= ~multicast_all;
1581
1582	e100_exec_cb(nic, NULL, e100_configure);
1583	e100_exec_cb(nic, NULL, e100_multi);
1584}
1585
1586static void e100_update_stats(struct nic *nic)
1587{
1588	struct net_device *dev = nic->netdev;
1589	struct net_device_stats *ns = &dev->stats;
1590	struct stats *s = &nic->mem->stats;
1591	__le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1592		(nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1593		&s->complete;
1594
1595	/* Device's stats reporting may take several microseconds to
1596	 * complete, so we're always waiting for results of the
1597	 * previous command. */
1598
1599	if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1600		*complete = 0;
1601		nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1602		nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1603		ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1604		ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1605		ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1606		ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1607		ns->collisions += nic->tx_collisions;
1608		ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1609			le32_to_cpu(s->tx_lost_crs);
1610		ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1611			nic->rx_over_length_errors;
1612		ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1613		ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1614		ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1615		ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1616		ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1617		ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1618			le32_to_cpu(s->rx_alignment_errors) +
1619			le32_to_cpu(s->rx_short_frame_errors) +
1620			le32_to_cpu(s->rx_cdt_errors);
1621		nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1622		nic->tx_single_collisions +=
1623			le32_to_cpu(s->tx_single_collisions);
1624		nic->tx_multiple_collisions +=
1625			le32_to_cpu(s->tx_multiple_collisions);
1626		if (nic->mac >= mac_82558_D101_A4) {
1627			nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1628			nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1629			nic->rx_fc_unsupported +=
1630				le32_to_cpu(s->fc_rcv_unsupported);
1631			if (nic->mac >= mac_82559_D101M) {
1632				nic->tx_tco_frames +=
1633					le16_to_cpu(s->xmt_tco_frames);
1634				nic->rx_tco_frames +=
1635					le16_to_cpu(s->rcv_tco_frames);
1636			}
1637		}
1638	}
1639
1640
1641	if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1642		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1643			     "exec cuc_dump_reset failed\n");
1644}
1645
1646static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1647{
1648	/* Adjust inter-frame-spacing (IFS) between two transmits if
1649	 * we're getting collisions on a half-duplex connection. */
1650
1651	if (duplex == DUPLEX_HALF) {
1652		u32 prev = nic->adaptive_ifs;
1653		u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1654
1655		if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1656		   (nic->tx_frames > min_frames)) {
1657			if (nic->adaptive_ifs < 60)
1658				nic->adaptive_ifs += 5;
1659		} else if (nic->tx_frames < min_frames) {
1660			if (nic->adaptive_ifs >= 5)
1661				nic->adaptive_ifs -= 5;
1662		}
1663		if (nic->adaptive_ifs != prev)
1664			e100_exec_cb(nic, NULL, e100_configure);
1665	}
1666}
1667
1668static void e100_watchdog(unsigned long data)
1669{
1670	struct nic *nic = (struct nic *)data;
1671	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1672	u32 speed;
1673
1674	netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1675		     "right now = %ld\n", jiffies);
1676
1677	/* mii library handles link maintenance tasks */
1678
1679	mii_ethtool_gset(&nic->mii, &cmd);
1680	speed = ethtool_cmd_speed(&cmd);
1681
1682	if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1683		netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1684			    speed == SPEED_100 ? 100 : 10,
1685			    cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1686	} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1687		netdev_info(nic->netdev, "NIC Link is Down\n");
1688	}
1689
1690	mii_check_link(&nic->mii);
1691
1692	/* Software generated interrupt to recover from (rare) Rx
1693	 * allocation failure.
1694	 * Unfortunately have to use a spinlock to not re-enable interrupts
1695	 * accidentally, due to hardware that shares a register between the
1696	 * interrupt mask bit and the SW Interrupt generation bit */
1697	spin_lock_irq(&nic->cmd_lock);
1698	iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1699	e100_write_flush(nic);
1700	spin_unlock_irq(&nic->cmd_lock);
1701
1702	e100_update_stats(nic);
1703	e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1704
1705	if (nic->mac <= mac_82557_D100_C)
1706		/* Issue a multicast command to workaround a 557 lock up */
1707		e100_set_multicast_list(nic->netdev);
1708
1709	if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1710		/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1711		nic->flags |= ich_10h_workaround;
1712	else
1713		nic->flags &= ~ich_10h_workaround;
1714
1715	mod_timer(&nic->watchdog,
1716		  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1717}
1718
1719static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1720	struct sk_buff *skb)
1721{
1722	cb->command = nic->tx_command;
1723	/* interrupt every 16 packets regardless of delay */
1724	if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1725		cb->command |= cpu_to_le16(cb_i);
1726	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1727	cb->u.tcb.tcb_byte_count = 0;
1728	cb->u.tcb.threshold = nic->tx_threshold;
1729	cb->u.tcb.tbd_count = 1;
1730	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1731		skb->data, skb->len, PCI_DMA_TODEVICE));
1732	/* check for mapping failure? */
1733	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1734}
1735
1736static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1737				   struct net_device *netdev)
1738{
1739	struct nic *nic = netdev_priv(netdev);
1740	int err;
1741
1742	if (nic->flags & ich_10h_workaround) {
1743		/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1744		   Issue a NOP command followed by a 1us delay before
1745		   issuing the Tx command. */
1746		if (e100_exec_cmd(nic, cuc_nop, 0))
1747			netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1748				     "exec cuc_nop failed\n");
1749		udelay(1);
1750	}
1751
1752	err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1753
1754	switch (err) {
1755	case -ENOSPC:
1756		/* We queued the skb, but now we're out of space. */
1757		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1758			     "No space for CB\n");
1759		netif_stop_queue(netdev);
1760		break;
1761	case -ENOMEM:
1762		/* This is a hard error - log it. */
1763		netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1764			     "Out of Tx resources, returning skb\n");
1765		netif_stop_queue(netdev);
1766		return NETDEV_TX_BUSY;
1767	}
1768
1769	return NETDEV_TX_OK;
1770}
1771
1772static int e100_tx_clean(struct nic *nic)
1773{
1774	struct net_device *dev = nic->netdev;
1775	struct cb *cb;
1776	int tx_cleaned = 0;
1777
1778	spin_lock(&nic->cb_lock);
1779
1780	/* Clean CBs marked complete */
1781	for (cb = nic->cb_to_clean;
1782	    cb->status & cpu_to_le16(cb_complete);
1783	    cb = nic->cb_to_clean = cb->next) {
1784		rmb(); /* read skb after status */
1785		netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1786			     "cb[%d]->status = 0x%04X\n",
1787			     (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1788			     cb->status);
1789
1790		if (likely(cb->skb != NULL)) {
1791			dev->stats.tx_packets++;
1792			dev->stats.tx_bytes += cb->skb->len;
1793
1794			pci_unmap_single(nic->pdev,
1795				le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1796				le16_to_cpu(cb->u.tcb.tbd.size),
1797				PCI_DMA_TODEVICE);
1798			dev_kfree_skb_any(cb->skb);
1799			cb->skb = NULL;
1800			tx_cleaned = 1;
1801		}
1802		cb->status = 0;
1803		nic->cbs_avail++;
1804	}
1805
1806	spin_unlock(&nic->cb_lock);
1807
1808	/* Recover from running out of Tx resources in xmit_frame */
1809	if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1810		netif_wake_queue(nic->netdev);
1811
1812	return tx_cleaned;
1813}
1814
1815static void e100_clean_cbs(struct nic *nic)
1816{
1817	if (nic->cbs) {
1818		while (nic->cbs_avail != nic->params.cbs.count) {
1819			struct cb *cb = nic->cb_to_clean;
1820			if (cb->skb) {
1821				pci_unmap_single(nic->pdev,
1822					le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1823					le16_to_cpu(cb->u.tcb.tbd.size),
1824					PCI_DMA_TODEVICE);
1825				dev_kfree_skb(cb->skb);
1826			}
1827			nic->cb_to_clean = nic->cb_to_clean->next;
1828			nic->cbs_avail++;
1829		}
1830		pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1831		nic->cbs = NULL;
1832		nic->cbs_avail = 0;
1833	}
1834	nic->cuc_cmd = cuc_start;
1835	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1836		nic->cbs;
1837}
1838
1839static int e100_alloc_cbs(struct nic *nic)
1840{
1841	struct cb *cb;
1842	unsigned int i, count = nic->params.cbs.count;
1843
1844	nic->cuc_cmd = cuc_start;
1845	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1846	nic->cbs_avail = 0;
1847
1848	nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1849				  &nic->cbs_dma_addr);
1850	if (!nic->cbs)
1851		return -ENOMEM;
1852	memset(nic->cbs, 0, count * sizeof(struct cb));
1853
1854	for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1855		cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1856		cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1857
1858		cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1859		cb->link = cpu_to_le32(nic->cbs_dma_addr +
1860			((i+1) % count) * sizeof(struct cb));
1861	}
1862
1863	nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1864	nic->cbs_avail = count;
1865
1866	return 0;
1867}
1868
1869static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1870{
1871	if (!nic->rxs) return;
1872	if (RU_SUSPENDED != nic->ru_running) return;
1873
1874	/* handle init time starts */
1875	if (!rx) rx = nic->rxs;
1876
1877	/* (Re)start RU if suspended or idle and RFA is non-NULL */
1878	if (rx->skb) {
1879		e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1880		nic->ru_running = RU_RUNNING;
1881	}
1882}
1883
1884#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1885static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1886{
1887	if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1888		return -ENOMEM;
1889
1890	/* Init, and map the RFD. */
1891	skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1892	rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1893		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1894
1895	if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1896		dev_kfree_skb_any(rx->skb);
1897		rx->skb = NULL;
1898		rx->dma_addr = 0;
1899		return -ENOMEM;
1900	}
1901
1902	/* Link the RFD to end of RFA by linking previous RFD to
1903	 * this one.  We are safe to touch the previous RFD because
1904	 * it is protected by the before last buffer's el bit being set */
1905	if (rx->prev->skb) {
1906		struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1907		put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1908		pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1909			sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1910	}
1911
1912	return 0;
1913}
1914
1915static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1916	unsigned int *work_done, unsigned int work_to_do)
1917{
1918	struct net_device *dev = nic->netdev;
1919	struct sk_buff *skb = rx->skb;
1920	struct rfd *rfd = (struct rfd *)skb->data;
1921	u16 rfd_status, actual_size;
1922
1923	if (unlikely(work_done && *work_done >= work_to_do))
1924		return -EAGAIN;
1925
1926	/* Need to sync before taking a peek at cb_complete bit */
1927	pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1928		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1929	rfd_status = le16_to_cpu(rfd->status);
1930
1931	netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1932		     "status=0x%04X\n", rfd_status);
1933	rmb(); /* read size after status bit */
1934
1935	/* If data isn't ready, nothing to indicate */
1936	if (unlikely(!(rfd_status & cb_complete))) {
1937		/* If the next buffer has the el bit, but we think the receiver
1938		 * is still running, check to see if it really stopped while
1939		 * we had interrupts off.
1940		 * This allows for a fast restart without re-enabling
1941		 * interrupts */
1942		if ((le16_to_cpu(rfd->command) & cb_el) &&
1943		    (RU_RUNNING == nic->ru_running))
1944
1945			if (ioread8(&nic->csr->scb.status) & rus_no_res)
1946				nic->ru_running = RU_SUSPENDED;
1947		pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1948					       sizeof(struct rfd),
1949					       PCI_DMA_FROMDEVICE);
1950		return -ENODATA;
1951	}
1952
1953	/* Get actual data size */
1954	actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1955	if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1956		actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1957
1958	/* Get data */
1959	pci_unmap_single(nic->pdev, rx->dma_addr,
1960		RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1961
1962	/* If this buffer has the el bit, but we think the receiver
1963	 * is still running, check to see if it really stopped while
1964	 * we had interrupts off.
1965	 * This allows for a fast restart without re-enabling interrupts.
1966	 * This can happen when the RU sees the size change but also sees
1967	 * the el bit set. */
1968	if ((le16_to_cpu(rfd->command) & cb_el) &&
1969	    (RU_RUNNING == nic->ru_running)) {
1970
1971	    if (ioread8(&nic->csr->scb.status) & rus_no_res)
1972		nic->ru_running = RU_SUSPENDED;
1973	}
1974
1975	/* Pull off the RFD and put the actual data (minus eth hdr) */
1976	skb_reserve(skb, sizeof(struct rfd));
1977	skb_put(skb, actual_size);
1978	skb->protocol = eth_type_trans(skb, nic->netdev);
1979
1980	if (unlikely(!(rfd_status & cb_ok))) {
1981		/* Don't indicate if hardware indicates errors */
1982		dev_kfree_skb_any(skb);
1983	} else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1984		/* Don't indicate oversized frames */
1985		nic->rx_over_length_errors++;
1986		dev_kfree_skb_any(skb);
1987	} else {
1988		dev->stats.rx_packets++;
1989		dev->stats.rx_bytes += actual_size;
1990		netif_receive_skb(skb);
1991		if (work_done)
1992			(*work_done)++;
1993	}
1994
1995	rx->skb = NULL;
1996
1997	return 0;
1998}
1999
2000static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2001	unsigned int work_to_do)
2002{
2003	struct rx *rx;
2004	int restart_required = 0, err = 0;
2005	struct rx *old_before_last_rx, *new_before_last_rx;
2006	struct rfd *old_before_last_rfd, *new_before_last_rfd;
2007
2008	/* Indicate newly arrived packets */
2009	for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2010		err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2011		/* Hit quota or no more to clean */
2012		if (-EAGAIN == err || -ENODATA == err)
2013			break;
2014	}
2015
2016
2017	/* On EAGAIN, hit quota so have more work to do, restart once
2018	 * cleanup is complete.
2019	 * Else, are we already rnr? then pay attention!!! this ensures that
2020	 * the state machine progression never allows a start with a
2021	 * partially cleaned list, avoiding a race between hardware
2022	 * and rx_to_clean when in NAPI mode */
2023	if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2024		restart_required = 1;
2025
2026	old_before_last_rx = nic->rx_to_use->prev->prev;
2027	old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2028
2029	/* Alloc new skbs to refill list */
2030	for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2031		if (unlikely(e100_rx_alloc_skb(nic, rx)))
2032			break; /* Better luck next time (see watchdog) */
2033	}
2034
2035	new_before_last_rx = nic->rx_to_use->prev->prev;
2036	if (new_before_last_rx != old_before_last_rx) {
2037		/* Set the el-bit on the buffer that is before the last buffer.
2038		 * This lets us update the next pointer on the last buffer
2039		 * without worrying about hardware touching it.
2040		 * We set the size to 0 to prevent hardware from touching this
2041		 * buffer.
2042		 * When the hardware hits the before last buffer with el-bit
2043		 * and size of 0, it will RNR interrupt, the RUS will go into
2044		 * the No Resources state.  It will not complete nor write to
2045		 * this buffer. */
2046		new_before_last_rfd =
2047			(struct rfd *)new_before_last_rx->skb->data;
2048		new_before_last_rfd->size = 0;
2049		new_before_last_rfd->command |= cpu_to_le16(cb_el);
2050		pci_dma_sync_single_for_device(nic->pdev,
2051			new_before_last_rx->dma_addr, sizeof(struct rfd),
2052			PCI_DMA_BIDIRECTIONAL);
2053
2054		/* Now that we have a new stopping point, we can clear the old
2055		 * stopping point.  We must sync twice to get the proper
2056		 * ordering on the hardware side of things. */
2057		old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2058		pci_dma_sync_single_for_device(nic->pdev,
2059			old_before_last_rx->dma_addr, sizeof(struct rfd),
2060			PCI_DMA_BIDIRECTIONAL);
2061		old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
2062		pci_dma_sync_single_for_device(nic->pdev,
2063			old_before_last_rx->dma_addr, sizeof(struct rfd),
2064			PCI_DMA_BIDIRECTIONAL);
2065	}
2066
2067	if (restart_required) {
2068		// ack the rnr?
2069		iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2070		e100_start_receiver(nic, nic->rx_to_clean);
2071		if (work_done)
2072			(*work_done)++;
2073	}
2074}
2075
2076static void e100_rx_clean_list(struct nic *nic)
2077{
2078	struct rx *rx;
2079	unsigned int i, count = nic->params.rfds.count;
2080
2081	nic->ru_running = RU_UNINITIALIZED;
2082
2083	if (nic->rxs) {
2084		for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2085			if (rx->skb) {
2086				pci_unmap_single(nic->pdev, rx->dma_addr,
2087					RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2088				dev_kfree_skb(rx->skb);
2089			}
2090		}
2091		kfree(nic->rxs);
2092		nic->rxs = NULL;
2093	}
2094
2095	nic->rx_to_use = nic->rx_to_clean = NULL;
2096}
2097
2098static int e100_rx_alloc_list(struct nic *nic)
2099{
2100	struct rx *rx;
2101	unsigned int i, count = nic->params.rfds.count;
2102	struct rfd *before_last;
2103
2104	nic->rx_to_use = nic->rx_to_clean = NULL;
2105	nic->ru_running = RU_UNINITIALIZED;
2106
2107	if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2108		return -ENOMEM;
2109
2110	for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2111		rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2112		rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2113		if (e100_rx_alloc_skb(nic, rx)) {
2114			e100_rx_clean_list(nic);
2115			return -ENOMEM;
2116		}
2117	}
2118	/* Set the el-bit on the buffer that is before the last buffer.
2119	 * This lets us update the next pointer on the last buffer without
2120	 * worrying about hardware touching it.
2121	 * We set the size to 0 to prevent hardware from touching this buffer.
2122	 * When the hardware hits the before last buffer with el-bit and size
2123	 * of 0, it will RNR interrupt, the RU will go into the No Resources
2124	 * state.  It will not complete nor write to this buffer. */
2125	rx = nic->rxs->prev->prev;
2126	before_last = (struct rfd *)rx->skb->data;
2127	before_last->command |= cpu_to_le16(cb_el);
2128	before_last->size = 0;
2129	pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2130		sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2131
2132	nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2133	nic->ru_running = RU_SUSPENDED;
2134
2135	return 0;
2136}
2137
2138static irqreturn_t e100_intr(int irq, void *dev_id)
2139{
2140	struct net_device *netdev = dev_id;
2141	struct nic *nic = netdev_priv(netdev);
2142	u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2143
2144	netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2145		     "stat_ack = 0x%02X\n", stat_ack);
2146
2147	if (stat_ack == stat_ack_not_ours ||	/* Not our interrupt */
2148	   stat_ack == stat_ack_not_present)	/* Hardware is ejected */
2149		return IRQ_NONE;
2150
2151	/* Ack interrupt(s) */
2152	iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2153
2154	/* We hit Receive No Resource (RNR); restart RU after cleaning */
2155	if (stat_ack & stat_ack_rnr)
2156		nic->ru_running = RU_SUSPENDED;
2157
2158	if (likely(napi_schedule_prep(&nic->napi))) {
2159		e100_disable_irq(nic);
2160		__napi_schedule(&nic->napi);
2161	}
2162
2163	return IRQ_HANDLED;
2164}
2165
2166static int e100_poll(struct napi_struct *napi, int budget)
2167{
2168	struct nic *nic = container_of(napi, struct nic, napi);
2169	unsigned int work_done = 0;
2170
2171	e100_rx_clean(nic, &work_done, budget);
2172	e100_tx_clean(nic);
2173
2174	/* If budget not fully consumed, exit the polling mode */
2175	if (work_done < budget) {
2176		napi_complete(napi);
2177		e100_enable_irq(nic);
2178	}
2179
2180	return work_done;
2181}
2182
2183#ifdef CONFIG_NET_POLL_CONTROLLER
2184static void e100_netpoll(struct net_device *netdev)
2185{
2186	struct nic *nic = netdev_priv(netdev);
2187
2188	e100_disable_irq(nic);
2189	e100_intr(nic->pdev->irq, netdev);
2190	e100_tx_clean(nic);
2191	e100_enable_irq(nic);
2192}
2193#endif
2194
2195static int e100_set_mac_address(struct net_device *netdev, void *p)
2196{
2197	struct nic *nic = netdev_priv(netdev);
2198	struct sockaddr *addr = p;
2199
2200	if (!is_valid_ether_addr(addr->sa_data))
2201		return -EADDRNOTAVAIL;
2202
2203	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2204	e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2205
2206	return 0;
2207}
2208
2209static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2210{
2211	if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2212		return -EINVAL;
2213	netdev->mtu = new_mtu;
2214	return 0;
2215}
2216
2217static int e100_asf(struct nic *nic)
2218{
2219	/* ASF can be enabled from eeprom */
2220	return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2221	   (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2222	   !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2223	   ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2224}
2225
2226static int e100_up(struct nic *nic)
2227{
2228	int err;
2229
2230	if ((err = e100_rx_alloc_list(nic)))
2231		return err;
2232	if ((err = e100_alloc_cbs(nic)))
2233		goto err_rx_clean_list;
2234	if ((err = e100_hw_init(nic)))
2235		goto err_clean_cbs;
2236	e100_set_multicast_list(nic->netdev);
2237	e100_start_receiver(nic, NULL);
2238	mod_timer(&nic->watchdog, jiffies);
2239	if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2240		nic->netdev->name, nic->netdev)))
2241		goto err_no_irq;
2242	netif_wake_queue(nic->netdev);
2243	napi_enable(&nic->napi);
2244	/* enable ints _after_ enabling poll, preventing a race between
2245	 * disable ints+schedule */
2246	e100_enable_irq(nic);
2247	return 0;
2248
2249err_no_irq:
2250	del_timer_sync(&nic->watchdog);
2251err_clean_cbs:
2252	e100_clean_cbs(nic);
2253err_rx_clean_list:
2254	e100_rx_clean_list(nic);
2255	return err;
2256}
2257
2258static void e100_down(struct nic *nic)
2259{
2260	/* wait here for poll to complete */
2261	napi_disable(&nic->napi);
2262	netif_stop_queue(nic->netdev);
2263	e100_hw_reset(nic);
2264	free_irq(nic->pdev->irq, nic->netdev);
2265	del_timer_sync(&nic->watchdog);
2266	netif_carrier_off(nic->netdev);
2267	e100_clean_cbs(nic);
2268	e100_rx_clean_list(nic);
2269}
2270
2271static void e100_tx_timeout(struct net_device *netdev)
2272{
2273	struct nic *nic = netdev_priv(netdev);
2274
2275	/* Reset outside of interrupt context, to avoid request_irq
2276	 * in interrupt context */
2277	schedule_work(&nic->tx_timeout_task);
2278}
2279
2280static void e100_tx_timeout_task(struct work_struct *work)
2281{
2282	struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2283	struct net_device *netdev = nic->netdev;
2284
2285	netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2286		     "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2287
2288	rtnl_lock();
2289	if (netif_running(netdev)) {
2290		e100_down(netdev_priv(netdev));
2291		e100_up(netdev_priv(netdev));
2292	}
2293	rtnl_unlock();
2294}
2295
2296static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2297{
2298	int err;
2299	struct sk_buff *skb;
2300
2301	/* Use driver resources to perform internal MAC or PHY
2302	 * loopback test.  A single packet is prepared and transmitted
2303	 * in loopback mode, and the test passes if the received
2304	 * packet compares byte-for-byte to the transmitted packet. */
2305
2306	if ((err = e100_rx_alloc_list(nic)))
2307		return err;
2308	if ((err = e100_alloc_cbs(nic)))
2309		goto err_clean_rx;
2310
2311	/* ICH PHY loopback is broken so do MAC loopback instead */
2312	if (nic->flags & ich && loopback_mode == lb_phy)
2313		loopback_mode = lb_mac;
2314
2315	nic->loopback = loopback_mode;
2316	if ((err = e100_hw_init(nic)))
2317		goto err_loopback_none;
2318
2319	if (loopback_mode == lb_phy)
2320		mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2321			BMCR_LOOPBACK);
2322
2323	e100_start_receiver(nic, NULL);
2324
2325	if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2326		err = -ENOMEM;
2327		goto err_loopback_none;
2328	}
2329	skb_put(skb, ETH_DATA_LEN);
2330	memset(skb->data, 0xFF, ETH_DATA_LEN);
2331	e100_xmit_frame(skb, nic->netdev);
2332
2333	msleep(10);
2334
2335	pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2336			RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2337
2338	if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2339	   skb->data, ETH_DATA_LEN))
2340		err = -EAGAIN;
2341
2342err_loopback_none:
2343	mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2344	nic->loopback = lb_none;
2345	e100_clean_cbs(nic);
2346	e100_hw_reset(nic);
2347err_clean_rx:
2348	e100_rx_clean_list(nic);
2349	return err;
2350}
2351
2352#define MII_LED_CONTROL	0x1B
2353#define E100_82552_LED_OVERRIDE 0x19
2354#define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
2355#define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
2356
2357static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2358{
2359	struct nic *nic = netdev_priv(netdev);
2360	return mii_ethtool_gset(&nic->mii, cmd);
2361}
2362
2363static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2364{
2365	struct nic *nic = netdev_priv(netdev);
2366	int err;
2367
2368	mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2369	err = mii_ethtool_sset(&nic->mii, cmd);
2370	e100_exec_cb(nic, NULL, e100_configure);
2371
2372	return err;
2373}
2374
2375static void e100_get_drvinfo(struct net_device *netdev,
2376	struct ethtool_drvinfo *info)
2377{
2378	struct nic *nic = netdev_priv(netdev);
2379	strcpy(info->driver, DRV_NAME);
2380	strcpy(info->version, DRV_VERSION);
2381	strcpy(info->fw_version, "N/A");
2382	strcpy(info->bus_info, pci_name(nic->pdev));
2383}
2384
2385#define E100_PHY_REGS 0x1C
2386static int e100_get_regs_len(struct net_device *netdev)
2387{
2388	struct nic *nic = netdev_priv(netdev);
2389	return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2390}
2391
2392static void e100_get_regs(struct net_device *netdev,
2393	struct ethtool_regs *regs, void *p)
2394{
2395	struct nic *nic = netdev_priv(netdev);
2396	u32 *buff = p;
2397	int i;
2398
2399	regs->version = (1 << 24) | nic->pdev->revision;
2400	buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2401		ioread8(&nic->csr->scb.cmd_lo) << 16 |
2402		ioread16(&nic->csr->scb.status);
2403	for (i = E100_PHY_REGS; i >= 0; i--)
2404		buff[1 + E100_PHY_REGS - i] =
2405			mdio_read(netdev, nic->mii.phy_id, i);
2406	memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2407	e100_exec_cb(nic, NULL, e100_dump);
2408	msleep(10);
2409	memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2410		sizeof(nic->mem->dump_buf));
2411}
2412
2413static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2414{
2415	struct nic *nic = netdev_priv(netdev);
2416	wol->supported = (nic->mac >= mac_82558_D101_A4) ?  WAKE_MAGIC : 0;
2417	wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2418}
2419
2420static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2421{
2422	struct nic *nic = netdev_priv(netdev);
2423
2424	if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2425	    !device_can_wakeup(&nic->pdev->dev))
2426		return -EOPNOTSUPP;
2427
2428	if (wol->wolopts)
2429		nic->flags |= wol_magic;
2430	else
2431		nic->flags &= ~wol_magic;
2432
2433	device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2434
2435	e100_exec_cb(nic, NULL, e100_configure);
2436
2437	return 0;
2438}
2439
2440static u32 e100_get_msglevel(struct net_device *netdev)
2441{
2442	struct nic *nic = netdev_priv(netdev);
2443	return nic->msg_enable;
2444}
2445
2446static void e100_set_msglevel(struct net_device *netdev, u32 value)
2447{
2448	struct nic *nic = netdev_priv(netdev);
2449	nic->msg_enable = value;
2450}
2451
2452static int e100_nway_reset(struct net_device *netdev)
2453{
2454	struct nic *nic = netdev_priv(netdev);
2455	return mii_nway_restart(&nic->mii);
2456}
2457
2458static u32 e100_get_link(struct net_device *netdev)
2459{
2460	struct nic *nic = netdev_priv(netdev);
2461	return mii_link_ok(&nic->mii);
2462}
2463
2464static int e100_get_eeprom_len(struct net_device *netdev)
2465{
2466	struct nic *nic = netdev_priv(netdev);
2467	return nic->eeprom_wc << 1;
2468}
2469
2470#define E100_EEPROM_MAGIC	0x1234
2471static int e100_get_eeprom(struct net_device *netdev,
2472	struct ethtool_eeprom *eeprom, u8 *bytes)
2473{
2474	struct nic *nic = netdev_priv(netdev);
2475
2476	eeprom->magic = E100_EEPROM_MAGIC;
2477	memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2478
2479	return 0;
2480}
2481
2482static int e100_set_eeprom(struct net_device *netdev,
2483	struct ethtool_eeprom *eeprom, u8 *bytes)
2484{
2485	struct nic *nic = netdev_priv(netdev);
2486
2487	if (eeprom->magic != E100_EEPROM_MAGIC)
2488		return -EINVAL;
2489
2490	memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2491
2492	return e100_eeprom_save(nic, eeprom->offset >> 1,
2493		(eeprom->len >> 1) + 1);
2494}
2495
2496static void e100_get_ringparam(struct net_device *netdev,
2497	struct ethtool_ringparam *ring)
2498{
2499	struct nic *nic = netdev_priv(netdev);
2500	struct param_range *rfds = &nic->params.rfds;
2501	struct param_range *cbs = &nic->params.cbs;
2502
2503	ring->rx_max_pending = rfds->max;
2504	ring->tx_max_pending = cbs->max;
2505	ring->rx_mini_max_pending = 0;
2506	ring->rx_jumbo_max_pending = 0;
2507	ring->rx_pending = rfds->count;
2508	ring->tx_pending = cbs->count;
2509	ring->rx_mini_pending = 0;
2510	ring->rx_jumbo_pending = 0;
2511}
2512
2513static int e100_set_ringparam(struct net_device *netdev,
2514	struct ethtool_ringparam *ring)
2515{
2516	struct nic *nic = netdev_priv(netdev);
2517	struct param_range *rfds = &nic->params.rfds;
2518	struct param_range *cbs = &nic->params.cbs;
2519
2520	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2521		return -EINVAL;
2522
2523	if (netif_running(netdev))
2524		e100_down(nic);
2525	rfds->count = max(ring->rx_pending, rfds->min);
2526	rfds->count = min(rfds->count, rfds->max);
2527	cbs->count = max(ring->tx_pending, cbs->min);
2528	cbs->count = min(cbs->count, cbs->max);
2529	netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2530		   rfds->count, cbs->count);
2531	if (netif_running(netdev))
2532		e100_up(nic);
2533
2534	return 0;
2535}
2536
2537static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2538	"Link test     (on/offline)",
2539	"Eeprom test   (on/offline)",
2540	"Self test        (offline)",
2541	"Mac loopback     (offline)",
2542	"Phy loopback     (offline)",
2543};
2544#define E100_TEST_LEN	ARRAY_SIZE(e100_gstrings_test)
2545
2546static void e100_diag_test(struct net_device *netdev,
2547	struct ethtool_test *test, u64 *data)
2548{
2549	struct ethtool_cmd cmd;
2550	struct nic *nic = netdev_priv(netdev);
2551	int i, err;
2552
2553	memset(data, 0, E100_TEST_LEN * sizeof(u64));
2554	data[0] = !mii_link_ok(&nic->mii);
2555	data[1] = e100_eeprom_load(nic);
2556	if (test->flags & ETH_TEST_FL_OFFLINE) {
2557
2558		/* save speed, duplex & autoneg settings */
2559		err = mii_ethtool_gset(&nic->mii, &cmd);
2560
2561		if (netif_running(netdev))
2562			e100_down(nic);
2563		data[2] = e100_self_test(nic);
2564		data[3] = e100_loopback_test(nic, lb_mac);
2565		data[4] = e100_loopback_test(nic, lb_phy);
2566
2567		/* restore speed, duplex & autoneg settings */
2568		err = mii_ethtool_sset(&nic->mii, &cmd);
2569
2570		if (netif_running(netdev))
2571			e100_up(nic);
2572	}
2573	for (i = 0; i < E100_TEST_LEN; i++)
2574		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2575
2576	msleep_interruptible(4 * 1000);
2577}
2578
2579static int e100_set_phys_id(struct net_device *netdev,
2580			    enum ethtool_phys_id_state state)
2581{
2582	struct nic *nic = netdev_priv(netdev);
2583	enum led_state {
2584		led_on     = 0x01,
2585		led_off    = 0x04,
2586		led_on_559 = 0x05,
2587		led_on_557 = 0x07,
2588	};
2589	u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2590		MII_LED_CONTROL;
2591	u16 leds = 0;
2592
2593	switch (state) {
2594	case ETHTOOL_ID_ACTIVE:
2595		return 2;
2596
2597	case ETHTOOL_ID_ON:
2598		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2599		       (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2600		break;
2601
2602	case ETHTOOL_ID_OFF:
2603		leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2604		break;
2605
2606	case ETHTOOL_ID_INACTIVE:
2607		break;
2608	}
2609
2610	mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2611	return 0;
2612}
2613
2614static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2615	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2616	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2617	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
2618	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2619	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2620	"tx_heartbeat_errors", "tx_window_errors",
2621	/* device-specific stats */
2622	"tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2623	"tx_flow_control_pause", "rx_flow_control_pause",
2624	"rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2625};
2626#define E100_NET_STATS_LEN	21
2627#define E100_STATS_LEN	ARRAY_SIZE(e100_gstrings_stats)
2628
2629static int e100_get_sset_count(struct net_device *netdev, int sset)
2630{
2631	switch (sset) {
2632	case ETH_SS_TEST:
2633		return E100_TEST_LEN;
2634	case ETH_SS_STATS:
2635		return E100_STATS_LEN;
2636	default:
2637		return -EOPNOTSUPP;
2638	}
2639}
2640
2641static void e100_get_ethtool_stats(struct net_device *netdev,
2642	struct ethtool_stats *stats, u64 *data)
2643{
2644	struct nic *nic = netdev_priv(netdev);
2645	int i;
2646
2647	for (i = 0; i < E100_NET_STATS_LEN; i++)
2648		data[i] = ((unsigned long *)&netdev->stats)[i];
2649
2650	data[i++] = nic->tx_deferred;
2651	data[i++] = nic->tx_single_collisions;
2652	data[i++] = nic->tx_multiple_collisions;
2653	data[i++] = nic->tx_fc_pause;
2654	data[i++] = nic->rx_fc_pause;
2655	data[i++] = nic->rx_fc_unsupported;
2656	data[i++] = nic->tx_tco_frames;
2657	data[i++] = nic->rx_tco_frames;
2658}
2659
2660static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2661{
2662	switch (stringset) {
2663	case ETH_SS_TEST:
2664		memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2665		break;
2666	case ETH_SS_STATS:
2667		memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2668		break;
2669	}
2670}
2671
2672static const struct ethtool_ops e100_ethtool_ops = {
2673	.get_settings		= e100_get_settings,
2674	.set_settings		= e100_set_settings,
2675	.get_drvinfo		= e100_get_drvinfo,
2676	.get_regs_len		= e100_get_regs_len,
2677	.get_regs		= e100_get_regs,
2678	.get_wol		= e100_get_wol,
2679	.set_wol		= e100_set_wol,
2680	.get_msglevel		= e100_get_msglevel,
2681	.set_msglevel		= e100_set_msglevel,
2682	.nway_reset		= e100_nway_reset,
2683	.get_link		= e100_get_link,
2684	.get_eeprom_len		= e100_get_eeprom_len,
2685	.get_eeprom		= e100_get_eeprom,
2686	.set_eeprom		= e100_set_eeprom,
2687	.get_ringparam		= e100_get_ringparam,
2688	.set_ringparam		= e100_set_ringparam,
2689	.self_test		= e100_diag_test,
2690	.get_strings		= e100_get_strings,
2691	.set_phys_id		= e100_set_phys_id,
2692	.get_ethtool_stats	= e100_get_ethtool_stats,
2693	.get_sset_count		= e100_get_sset_count,
2694};
2695
2696static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2697{
2698	struct nic *nic = netdev_priv(netdev);
2699
2700	return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2701}
2702
2703static int e100_alloc(struct nic *nic)
2704{
2705	nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2706		&nic->dma_addr);
2707	return nic->mem ? 0 : -ENOMEM;
2708}
2709
2710static void e100_free(struct nic *nic)
2711{
2712	if (nic->mem) {
2713		pci_free_consistent(nic->pdev, sizeof(struct mem),
2714			nic->mem, nic->dma_addr);
2715		nic->mem = NULL;
2716	}
2717}
2718
2719static int e100_open(struct net_device *netdev)
2720{
2721	struct nic *nic = netdev_priv(netdev);
2722	int err = 0;
2723
2724	netif_carrier_off(netdev);
2725	if ((err = e100_up(nic)))
2726		netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2727	return err;
2728}
2729
2730static int e100_close(struct net_device *netdev)
2731{
2732	e100_down(netdev_priv(netdev));
2733	return 0;
2734}
2735
2736static const struct net_device_ops e100_netdev_ops = {
2737	.ndo_open		= e100_open,
2738	.ndo_stop		= e100_close,
2739	.ndo_start_xmit		= e100_xmit_frame,
2740	.ndo_validate_addr	= eth_validate_addr,
2741	.ndo_set_multicast_list	= e100_set_multicast_list,
2742	.ndo_set_mac_address	= e100_set_mac_address,
2743	.ndo_change_mtu		= e100_change_mtu,
2744	.ndo_do_ioctl		= e100_do_ioctl,
2745	.ndo_tx_timeout		= e100_tx_timeout,
2746#ifdef CONFIG_NET_POLL_CONTROLLER
2747	.ndo_poll_controller	= e100_netpoll,
2748#endif
2749};
2750
2751static int __devinit e100_probe(struct pci_dev *pdev,
2752	const struct pci_device_id *ent)
2753{
2754	struct net_device *netdev;
2755	struct nic *nic;
2756	int err;
2757
2758	if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2759		if (((1 << debug) - 1) & NETIF_MSG_PROBE)
2760			pr_err("Etherdev alloc failed, aborting\n");
2761		return -ENOMEM;
2762	}
2763
2764	netdev->netdev_ops = &e100_netdev_ops;
2765	SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2766	netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2767	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2768
2769	nic = netdev_priv(netdev);
2770	netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2771	nic->netdev = netdev;
2772	nic->pdev = pdev;
2773	nic->msg_enable = (1 << debug) - 1;
2774	nic->mdio_ctrl = mdio_ctrl_hw;
2775	pci_set_drvdata(pdev, netdev);
2776
2777	if ((err = pci_enable_device(pdev))) {
2778		netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2779		goto err_out_free_dev;
2780	}
2781
2782	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2783		netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2784		err = -ENODEV;
2785		goto err_out_disable_pdev;
2786	}
2787
2788	if ((err = pci_request_regions(pdev, DRV_NAME))) {
2789		netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2790		goto err_out_disable_pdev;
2791	}
2792
2793	if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2794		netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2795		goto err_out_free_res;
2796	}
2797
2798	SET_NETDEV_DEV(netdev, &pdev->dev);
2799
2800	if (use_io)
2801		netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2802
2803	nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2804	if (!nic->csr) {
2805		netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2806		err = -ENOMEM;
2807		goto err_out_free_res;
2808	}
2809
2810	if (ent->driver_data)
2811		nic->flags |= ich;
2812	else
2813		nic->flags &= ~ich;
2814
2815	e100_get_defaults(nic);
2816
2817	/* locks must be initialized before calling hw_reset */
2818	spin_lock_init(&nic->cb_lock);
2819	spin_lock_init(&nic->cmd_lock);
2820	spin_lock_init(&nic->mdio_lock);
2821
2822	/* Reset the device before pci_set_master() in case device is in some
2823	 * funky state and has an interrupt pending - hint: we don't have the
2824	 * interrupt handler registered yet. */
2825	e100_hw_reset(nic);
2826
2827	pci_set_master(pdev);
2828
2829	init_timer(&nic->watchdog);
2830	nic->watchdog.function = e100_watchdog;
2831	nic->watchdog.data = (unsigned long)nic;
2832
2833	INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2834
2835	if ((err = e100_alloc(nic))) {
2836		netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2837		goto err_out_iounmap;
2838	}
2839
2840	if ((err = e100_eeprom_load(nic)))
2841		goto err_out_free;
2842
2843	e100_phy_init(nic);
2844
2845	memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2846	memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2847	if (!is_valid_ether_addr(netdev->perm_addr)) {
2848		if (!eeprom_bad_csum_allow) {
2849			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2850			err = -EAGAIN;
2851			goto err_out_free;
2852		} else {
2853			netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2854		}
2855	}
2856
2857	/* Wol magic packet can be enabled from eeprom */
2858	if ((nic->mac >= mac_82558_D101_A4) &&
2859	   (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2860		nic->flags |= wol_magic;
2861		device_set_wakeup_enable(&pdev->dev, true);
2862	}
2863
2864	/* ack any pending wake events, disable PME */
2865	pci_pme_active(pdev, false);
2866
2867	strcpy(netdev->name, "eth%d");
2868	if ((err = register_netdev(netdev))) {
2869		netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2870		goto err_out_free;
2871	}
2872	nic->cbs_pool = pci_pool_create(netdev->name,
2873			   nic->pdev,
2874			   nic->params.cbs.max * sizeof(struct cb),
2875			   sizeof(u32),
2876			   0);
2877	netif_info(nic, probe, nic->netdev,
2878		   "addr 0x%llx, irq %d, MAC addr %pM\n",
2879		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2880		   pdev->irq, netdev->dev_addr);
2881
2882	return 0;
2883
2884err_out_free:
2885	e100_free(nic);
2886err_out_iounmap:
2887	pci_iounmap(pdev, nic->csr);
2888err_out_free_res:
2889	pci_release_regions(pdev);
2890err_out_disable_pdev:
2891	pci_disable_device(pdev);
2892err_out_free_dev:
2893	pci_set_drvdata(pdev, NULL);
2894	free_netdev(netdev);
2895	return err;
2896}
2897
2898static void __devexit e100_remove(struct pci_dev *pdev)
2899{
2900	struct net_device *netdev = pci_get_drvdata(pdev);
2901
2902	if (netdev) {
2903		struct nic *nic = netdev_priv(netdev);
2904		unregister_netdev(netdev);
2905		e100_free(nic);
2906		pci_iounmap(pdev, nic->csr);
2907		pci_pool_destroy(nic->cbs_pool);
2908		free_netdev(netdev);
2909		pci_release_regions(pdev);
2910		pci_disable_device(pdev);
2911		pci_set_drvdata(pdev, NULL);
2912	}
2913}
2914
2915#define E100_82552_SMARTSPEED   0x14   /* SmartSpeed Ctrl register */
2916#define E100_82552_REV_ANEG     0x0200 /* Reverse auto-negotiation */
2917#define E100_82552_ANEG_NOW     0x0400 /* Auto-negotiate now */
2918static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2919{
2920	struct net_device *netdev = pci_get_drvdata(pdev);
2921	struct nic *nic = netdev_priv(netdev);
2922
2923	if (netif_running(netdev))
2924		e100_down(nic);
2925	netif_device_detach(netdev);
2926
2927	pci_save_state(pdev);
2928
2929	if ((nic->flags & wol_magic) | e100_asf(nic)) {
2930		/* enable reverse auto-negotiation */
2931		if (nic->phy == phy_82552_v) {
2932			u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2933			                           E100_82552_SMARTSPEED);
2934
2935			mdio_write(netdev, nic->mii.phy_id,
2936			           E100_82552_SMARTSPEED, smartspeed |
2937			           E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
2938		}
2939		*enable_wake = true;
2940	} else {
2941		*enable_wake = false;
2942	}
2943
2944	pci_disable_device(pdev);
2945}
2946
2947static int __e100_power_off(struct pci_dev *pdev, bool wake)
2948{
2949	if (wake)
2950		return pci_prepare_to_sleep(pdev);
2951
2952	pci_wake_from_d3(pdev, false);
2953	pci_set_power_state(pdev, PCI_D3hot);
2954
2955	return 0;
2956}
2957
2958#ifdef CONFIG_PM
2959static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2960{
2961	bool wake;
2962	__e100_shutdown(pdev, &wake);
2963	return __e100_power_off(pdev, wake);
2964}
2965
2966static int e100_resume(struct pci_dev *pdev)
2967{
2968	struct net_device *netdev = pci_get_drvdata(pdev);
2969	struct nic *nic = netdev_priv(netdev);
2970
2971	pci_set_power_state(pdev, PCI_D0);
2972	pci_restore_state(pdev);
2973	/* ack any pending wake events, disable PME */
2974	pci_enable_wake(pdev, 0, 0);
2975
2976	/* disable reverse auto-negotiation */
2977	if (nic->phy == phy_82552_v) {
2978		u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2979		                           E100_82552_SMARTSPEED);
2980
2981		mdio_write(netdev, nic->mii.phy_id,
2982		           E100_82552_SMARTSPEED,
2983		           smartspeed & ~(E100_82552_REV_ANEG));
2984	}
2985
2986	netif_device_attach(netdev);
2987	if (netif_running(netdev))
2988		e100_up(nic);
2989
2990	return 0;
2991}
2992#endif /* CONFIG_PM */
2993
2994static void e100_shutdown(struct pci_dev *pdev)
2995{
2996	bool wake;
2997	__e100_shutdown(pdev, &wake);
2998	if (system_state == SYSTEM_POWER_OFF)
2999		__e100_power_off(pdev, wake);
3000}
3001
3002/* ------------------ PCI Error Recovery infrastructure  -------------- */
3003/**
3004 * e100_io_error_detected - called when PCI error is detected.
3005 * @pdev: Pointer to PCI device
3006 * @state: The current pci connection state
3007 */
3008static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3009{
3010	struct net_device *netdev = pci_get_drvdata(pdev);
3011	struct nic *nic = netdev_priv(netdev);
3012
3013	netif_device_detach(netdev);
3014
3015	if (state == pci_channel_io_perm_failure)
3016		return PCI_ERS_RESULT_DISCONNECT;
3017
3018	if (netif_running(netdev))
3019		e100_down(nic);
3020	pci_disable_device(pdev);
3021
3022	/* Request a slot reset. */
3023	return PCI_ERS_RESULT_NEED_RESET;
3024}
3025
3026/**
3027 * e100_io_slot_reset - called after the pci bus has been reset.
3028 * @pdev: Pointer to PCI device
3029 *
3030 * Restart the card from scratch.
3031 */
3032static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3033{
3034	struct net_device *netdev = pci_get_drvdata(pdev);
3035	struct nic *nic = netdev_priv(netdev);
3036
3037	if (pci_enable_device(pdev)) {
3038		pr_err("Cannot re-enable PCI device after reset\n");
3039		return PCI_ERS_RESULT_DISCONNECT;
3040	}
3041	pci_set_master(pdev);
3042
3043	/* Only one device per card can do a reset */
3044	if (0 != PCI_FUNC(pdev->devfn))
3045		return PCI_ERS_RESULT_RECOVERED;
3046	e100_hw_reset(nic);
3047	e100_phy_init(nic);
3048
3049	return PCI_ERS_RESULT_RECOVERED;
3050}
3051
3052/**
3053 * e100_io_resume - resume normal operations
3054 * @pdev: Pointer to PCI device
3055 *
3056 * Resume normal operations after an error recovery
3057 * sequence has been completed.
3058 */
3059static void e100_io_resume(struct pci_dev *pdev)
3060{
3061	struct net_device *netdev = pci_get_drvdata(pdev);
3062	struct nic *nic = netdev_priv(netdev);
3063
3064	/* ack any pending wake events, disable PME */
3065	pci_enable_wake(pdev, 0, 0);
3066
3067	netif_device_attach(netdev);
3068	if (netif_running(netdev)) {
3069		e100_open(netdev);
3070		mod_timer(&nic->watchdog, jiffies);
3071	}
3072}
3073
3074static struct pci_error_handlers e100_err_handler = {
3075	.error_detected = e100_io_error_detected,
3076	.slot_reset = e100_io_slot_reset,
3077	.resume = e100_io_resume,
3078};
3079
3080static struct pci_driver e100_driver = {
3081	.name =         DRV_NAME,
3082	.id_table =     e100_id_table,
3083	.probe =        e100_probe,
3084	.remove =       __devexit_p(e100_remove),
3085#ifdef CONFIG_PM
3086	/* Power Management hooks */
3087	.suspend =      e100_suspend,
3088	.resume =       e100_resume,
3089#endif
3090	.shutdown =     e100_shutdown,
3091	.err_handler = &e100_err_handler,
3092};
3093
3094static int __init e100_init_module(void)
3095{
3096	if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3097		pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3098		pr_info("%s\n", DRV_COPYRIGHT);
3099	}
3100	return pci_register_driver(&e100_driver);
3101}
3102
3103static void __exit e100_cleanup_module(void)
3104{
3105	pci_unregister_driver(&e100_driver);
3106}
3107
3108module_init(e100_init_module);
3109module_exit(e100_cleanup_module);