Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*******************************************************************************
3
4 Intel PRO/100 Linux driver
5 Copyright(c) 1999 - 2006 Intel Corporation.
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
26 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27
28*******************************************************************************/
29
30/*
31 * e100.c: Intel(R) PRO/100 ethernet driver
32 *
33 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
34 * original e100 driver, but better described as a munging of
35 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
36 *
37 * References:
38 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
39 * Open Source Software Developers Manual,
40 * http://sourceforge.net/projects/e1000
41 *
42 *
43 * Theory of Operation
44 *
45 * I. General
46 *
47 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
48 * controller family, which includes the 82557, 82558, 82559, 82550,
49 * 82551, and 82562 devices. 82558 and greater controllers
50 * integrate the Intel 82555 PHY. The controllers are used in
51 * server and client network interface cards, as well as in
52 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
53 * configurations. 8255x supports a 32-bit linear addressing
54 * mode and operates at 33Mhz PCI clock rate.
55 *
56 * II. Driver Operation
57 *
58 * Memory-mapped mode is used exclusively to access the device's
59 * shared-memory structure, the Control/Status Registers (CSR). All
60 * setup, configuration, and control of the device, including queuing
61 * of Tx, Rx, and configuration commands is through the CSR.
62 * cmd_lock serializes accesses to the CSR command register. cb_lock
63 * protects the shared Command Block List (CBL).
64 *
65 * 8255x is highly MII-compliant and all access to the PHY go
66 * through the Management Data Interface (MDI). Consequently, the
67 * driver leverages the mii.c library shared with other MII-compliant
68 * devices.
69 *
70 * Big- and Little-Endian byte order as well as 32- and 64-bit
71 * archs are supported. Weak-ordered memory and non-cache-coherent
72 * archs are supported.
73 *
74 * III. Transmit
75 *
76 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
77 * together in a fixed-size ring (CBL) thus forming the flexible mode
78 * memory structure. A TCB marked with the suspend-bit indicates
79 * the end of the ring. The last TCB processed suspends the
80 * controller, and the controller can be restarted by issue a CU
81 * resume command to continue from the suspend point, or a CU start
82 * command to start at a given position in the ring.
83 *
84 * Non-Tx commands (config, multicast setup, etc) are linked
85 * into the CBL ring along with Tx commands. The common structure
86 * used for both Tx and non-Tx commands is the Command Block (CB).
87 *
88 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
89 * is the next CB to check for completion; cb_to_send is the first
90 * CB to start on in case of a previous failure to resume. CB clean
91 * up happens in interrupt context in response to a CU interrupt.
92 * cbs_avail keeps track of number of free CB resources available.
93 *
94 * Hardware padding of short packets to minimum packet size is
95 * enabled. 82557 pads with 7Eh, while the later controllers pad
96 * with 00h.
97 *
98 * IV. Receive
99 *
100 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
101 * Descriptors (RFD) + data buffer, thus forming the simplified mode
102 * memory structure. Rx skbs are allocated to contain both the RFD
103 * and the data buffer, but the RFD is pulled off before the skb is
104 * indicated. The data buffer is aligned such that encapsulated
105 * protocol headers are u32-aligned. Since the RFD is part of the
106 * mapped shared memory, and completion status is contained within
107 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
108 * view from software and hardware.
109 *
110 * In order to keep updates to the RFD link field from colliding with
111 * hardware writes to mark packets complete, we use the feature that
112 * hardware will not write to a size 0 descriptor and mark the previous
113 * packet as end-of-list (EL). After updating the link, we remove EL
114 * and only then restore the size such that hardware may use the
115 * previous-to-end RFD.
116 *
117 * Under typical operation, the receive unit (RU) is start once,
118 * and the controller happily fills RFDs as frames arrive. If
119 * replacement RFDs cannot be allocated, or the RU goes non-active,
120 * the RU must be restarted. Frame arrival generates an interrupt,
121 * and Rx indication and re-allocation happen in the same context,
122 * therefore no locking is required. A software-generated interrupt
123 * is generated from the watchdog to recover from a failed allocation
124 * scenario where all Rx resources have been indicated and none re-
125 * placed.
126 *
127 * V. Miscellaneous
128 *
129 * VLAN offloading of tagging, stripping and filtering is not
130 * supported, but driver will accommodate the extra 4-byte VLAN tag
131 * for processing by upper layers. Tx/Rx Checksum offloading is not
132 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
133 * not supported (hardware limitation).
134 *
135 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
136 *
137 * Thanks to JC (jchapman@katalix.com) for helping with
138 * testing/troubleshooting the development driver.
139 *
140 * TODO:
141 * o several entry points race with dev->close
142 * o check for tx-no-resources/stop Q races with tx clean/wake Q
143 *
144 * FIXES:
145 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
146 * - Stratus87247: protect MDI control register manipulations
147 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
148 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
149 */
150
151#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
152
153#include <linux/hardirq.h>
154#include <linux/interrupt.h>
155#include <linux/module.h>
156#include <linux/moduleparam.h>
157#include <linux/kernel.h>
158#include <linux/types.h>
159#include <linux/sched.h>
160#include <linux/slab.h>
161#include <linux/delay.h>
162#include <linux/init.h>
163#include <linux/pci.h>
164#include <linux/dma-mapping.h>
165#include <linux/dmapool.h>
166#include <linux/netdevice.h>
167#include <linux/etherdevice.h>
168#include <linux/mii.h>
169#include <linux/if_vlan.h>
170#include <linux/skbuff.h>
171#include <linux/ethtool.h>
172#include <linux/string.h>
173#include <linux/firmware.h>
174#include <linux/rtnetlink.h>
175#include <asm/unaligned.h>
176
177
178#define DRV_NAME "e100"
179#define DRV_EXT "-NAPI"
180#define DRV_VERSION "3.5.24-k2"DRV_EXT
181#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
182#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
183
184#define E100_WATCHDOG_PERIOD (2 * HZ)
185#define E100_NAPI_WEIGHT 16
186
187#define FIRMWARE_D101M "e100/d101m_ucode.bin"
188#define FIRMWARE_D101S "e100/d101s_ucode.bin"
189#define FIRMWARE_D102E "e100/d102e_ucode.bin"
190
191MODULE_DESCRIPTION(DRV_DESCRIPTION);
192MODULE_AUTHOR(DRV_COPYRIGHT);
193MODULE_LICENSE("GPL");
194MODULE_VERSION(DRV_VERSION);
195MODULE_FIRMWARE(FIRMWARE_D101M);
196MODULE_FIRMWARE(FIRMWARE_D101S);
197MODULE_FIRMWARE(FIRMWARE_D102E);
198
199static int debug = 3;
200static int eeprom_bad_csum_allow = 0;
201static int use_io = 0;
202module_param(debug, int, 0);
203module_param(eeprom_bad_csum_allow, int, 0);
204module_param(use_io, int, 0);
205MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
206MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
207MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
208
209#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
210 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
211 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
212static const struct pci_device_id e100_id_table[] = {
213 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
220 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
226 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
234 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
235 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
243 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
249 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
251 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
254 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
255 { 0, }
256};
257MODULE_DEVICE_TABLE(pci, e100_id_table);
258
259enum mac {
260 mac_82557_D100_A = 0,
261 mac_82557_D100_B = 1,
262 mac_82557_D100_C = 2,
263 mac_82558_D101_A4 = 4,
264 mac_82558_D101_B0 = 5,
265 mac_82559_D101M = 8,
266 mac_82559_D101S = 9,
267 mac_82550_D102 = 12,
268 mac_82550_D102_C = 13,
269 mac_82551_E = 14,
270 mac_82551_F = 15,
271 mac_82551_10 = 16,
272 mac_unknown = 0xFF,
273};
274
275enum phy {
276 phy_100a = 0x000003E0,
277 phy_100c = 0x035002A8,
278 phy_82555_tx = 0x015002A8,
279 phy_nsc_tx = 0x5C002000,
280 phy_82562_et = 0x033002A8,
281 phy_82562_em = 0x032002A8,
282 phy_82562_ek = 0x031002A8,
283 phy_82562_eh = 0x017002A8,
284 phy_82552_v = 0xd061004d,
285 phy_unknown = 0xFFFFFFFF,
286};
287
288/* CSR (Control/Status Registers) */
289struct csr {
290 struct {
291 u8 status;
292 u8 stat_ack;
293 u8 cmd_lo;
294 u8 cmd_hi;
295 u32 gen_ptr;
296 } scb;
297 u32 port;
298 u16 flash_ctrl;
299 u8 eeprom_ctrl_lo;
300 u8 eeprom_ctrl_hi;
301 u32 mdi_ctrl;
302 u32 rx_dma_count;
303};
304
305enum scb_status {
306 rus_no_res = 0x08,
307 rus_ready = 0x10,
308 rus_mask = 0x3C,
309};
310
311enum ru_state {
312 RU_SUSPENDED = 0,
313 RU_RUNNING = 1,
314 RU_UNINITIALIZED = -1,
315};
316
317enum scb_stat_ack {
318 stat_ack_not_ours = 0x00,
319 stat_ack_sw_gen = 0x04,
320 stat_ack_rnr = 0x10,
321 stat_ack_cu_idle = 0x20,
322 stat_ack_frame_rx = 0x40,
323 stat_ack_cu_cmd_done = 0x80,
324 stat_ack_not_present = 0xFF,
325 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
326 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
327};
328
329enum scb_cmd_hi {
330 irq_mask_none = 0x00,
331 irq_mask_all = 0x01,
332 irq_sw_gen = 0x02,
333};
334
335enum scb_cmd_lo {
336 cuc_nop = 0x00,
337 ruc_start = 0x01,
338 ruc_load_base = 0x06,
339 cuc_start = 0x10,
340 cuc_resume = 0x20,
341 cuc_dump_addr = 0x40,
342 cuc_dump_stats = 0x50,
343 cuc_load_base = 0x60,
344 cuc_dump_reset = 0x70,
345};
346
347enum cuc_dump {
348 cuc_dump_complete = 0x0000A005,
349 cuc_dump_reset_complete = 0x0000A007,
350};
351
352enum port {
353 software_reset = 0x0000,
354 selftest = 0x0001,
355 selective_reset = 0x0002,
356};
357
358enum eeprom_ctrl_lo {
359 eesk = 0x01,
360 eecs = 0x02,
361 eedi = 0x04,
362 eedo = 0x08,
363};
364
365enum mdi_ctrl {
366 mdi_write = 0x04000000,
367 mdi_read = 0x08000000,
368 mdi_ready = 0x10000000,
369};
370
371enum eeprom_op {
372 op_write = 0x05,
373 op_read = 0x06,
374 op_ewds = 0x10,
375 op_ewen = 0x13,
376};
377
378enum eeprom_offsets {
379 eeprom_cnfg_mdix = 0x03,
380 eeprom_phy_iface = 0x06,
381 eeprom_id = 0x0A,
382 eeprom_config_asf = 0x0D,
383 eeprom_smbus_addr = 0x90,
384};
385
386enum eeprom_cnfg_mdix {
387 eeprom_mdix_enabled = 0x0080,
388};
389
390enum eeprom_phy_iface {
391 NoSuchPhy = 0,
392 I82553AB,
393 I82553C,
394 I82503,
395 DP83840,
396 S80C240,
397 S80C24,
398 I82555,
399 DP83840A = 10,
400};
401
402enum eeprom_id {
403 eeprom_id_wol = 0x0020,
404};
405
406enum eeprom_config_asf {
407 eeprom_asf = 0x8000,
408 eeprom_gcl = 0x4000,
409};
410
411enum cb_status {
412 cb_complete = 0x8000,
413 cb_ok = 0x2000,
414};
415
416/**
417 * cb_command - Command Block flags
418 * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
419 */
420enum cb_command {
421 cb_nop = 0x0000,
422 cb_iaaddr = 0x0001,
423 cb_config = 0x0002,
424 cb_multi = 0x0003,
425 cb_tx = 0x0004,
426 cb_ucode = 0x0005,
427 cb_dump = 0x0006,
428 cb_tx_sf = 0x0008,
429 cb_tx_nc = 0x0010,
430 cb_cid = 0x1f00,
431 cb_i = 0x2000,
432 cb_s = 0x4000,
433 cb_el = 0x8000,
434};
435
436struct rfd {
437 __le16 status;
438 __le16 command;
439 __le32 link;
440 __le32 rbd;
441 __le16 actual_size;
442 __le16 size;
443};
444
445struct rx {
446 struct rx *next, *prev;
447 struct sk_buff *skb;
448 dma_addr_t dma_addr;
449};
450
451#if defined(__BIG_ENDIAN_BITFIELD)
452#define X(a,b) b,a
453#else
454#define X(a,b) a,b
455#endif
456struct config {
457/*0*/ u8 X(byte_count:6, pad0:2);
458/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
459/*2*/ u8 adaptive_ifs;
460/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
461 term_write_cache_line:1), pad3:4);
462/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
463/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
464/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
465 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
466 rx_save_overruns : 1), rx_save_bad_frames : 1);
467/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
468 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
469 tx_dynamic_tbd:1);
470/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
471/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
472 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
473/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
474 loopback:2);
475/*11*/ u8 X(linear_priority:3, pad11:5);
476/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
477/*13*/ u8 ip_addr_lo;
478/*14*/ u8 ip_addr_hi;
479/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
480 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
481 pad15_2:1), crs_or_cdt:1);
482/*16*/ u8 fc_delay_lo;
483/*17*/ u8 fc_delay_hi;
484/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
485 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
486/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
487 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
488 full_duplex_force:1), full_duplex_pin:1);
489/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
490/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
491/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
492 u8 pad_d102[9];
493};
494
495#define E100_MAX_MULTICAST_ADDRS 64
496struct multi {
497 __le16 count;
498 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
499};
500
501/* Important: keep total struct u32-aligned */
502#define UCODE_SIZE 134
503struct cb {
504 __le16 status;
505 __le16 command;
506 __le32 link;
507 union {
508 u8 iaaddr[ETH_ALEN];
509 __le32 ucode[UCODE_SIZE];
510 struct config config;
511 struct multi multi;
512 struct {
513 u32 tbd_array;
514 u16 tcb_byte_count;
515 u8 threshold;
516 u8 tbd_count;
517 struct {
518 __le32 buf_addr;
519 __le16 size;
520 u16 eol;
521 } tbd;
522 } tcb;
523 __le32 dump_buffer_addr;
524 } u;
525 struct cb *next, *prev;
526 dma_addr_t dma_addr;
527 struct sk_buff *skb;
528};
529
530enum loopback {
531 lb_none = 0, lb_mac = 1, lb_phy = 3,
532};
533
534struct stats {
535 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
536 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
537 tx_multiple_collisions, tx_total_collisions;
538 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
539 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
540 rx_short_frame_errors;
541 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
542 __le16 xmt_tco_frames, rcv_tco_frames;
543 __le32 complete;
544};
545
546struct mem {
547 struct {
548 u32 signature;
549 u32 result;
550 } selftest;
551 struct stats stats;
552 u8 dump_buf[596];
553};
554
555struct param_range {
556 u32 min;
557 u32 max;
558 u32 count;
559};
560
561struct params {
562 struct param_range rfds;
563 struct param_range cbs;
564};
565
566struct nic {
567 /* Begin: frequently used values: keep adjacent for cache effect */
568 u32 msg_enable ____cacheline_aligned;
569 struct net_device *netdev;
570 struct pci_dev *pdev;
571 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
572
573 struct rx *rxs ____cacheline_aligned;
574 struct rx *rx_to_use;
575 struct rx *rx_to_clean;
576 struct rfd blank_rfd;
577 enum ru_state ru_running;
578
579 spinlock_t cb_lock ____cacheline_aligned;
580 spinlock_t cmd_lock;
581 struct csr __iomem *csr;
582 enum scb_cmd_lo cuc_cmd;
583 unsigned int cbs_avail;
584 struct napi_struct napi;
585 struct cb *cbs;
586 struct cb *cb_to_use;
587 struct cb *cb_to_send;
588 struct cb *cb_to_clean;
589 __le16 tx_command;
590 /* End: frequently used values: keep adjacent for cache effect */
591
592 enum {
593 ich = (1 << 0),
594 promiscuous = (1 << 1),
595 multicast_all = (1 << 2),
596 wol_magic = (1 << 3),
597 ich_10h_workaround = (1 << 4),
598 } flags ____cacheline_aligned;
599
600 enum mac mac;
601 enum phy phy;
602 struct params params;
603 struct timer_list watchdog;
604 struct mii_if_info mii;
605 struct work_struct tx_timeout_task;
606 enum loopback loopback;
607
608 struct mem *mem;
609 dma_addr_t dma_addr;
610
611 struct dma_pool *cbs_pool;
612 dma_addr_t cbs_dma_addr;
613 u8 adaptive_ifs;
614 u8 tx_threshold;
615 u32 tx_frames;
616 u32 tx_collisions;
617 u32 tx_deferred;
618 u32 tx_single_collisions;
619 u32 tx_multiple_collisions;
620 u32 tx_fc_pause;
621 u32 tx_tco_frames;
622
623 u32 rx_fc_pause;
624 u32 rx_fc_unsupported;
625 u32 rx_tco_frames;
626 u32 rx_short_frame_errors;
627 u32 rx_over_length_errors;
628
629 u16 eeprom_wc;
630 __le16 eeprom[256];
631 spinlock_t mdio_lock;
632 const struct firmware *fw;
633};
634
635static inline void e100_write_flush(struct nic *nic)
636{
637 /* Flush previous PCI writes through intermediate bridges
638 * by doing a benign read */
639 (void)ioread8(&nic->csr->scb.status);
640}
641
642static void e100_enable_irq(struct nic *nic)
643{
644 unsigned long flags;
645
646 spin_lock_irqsave(&nic->cmd_lock, flags);
647 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
648 e100_write_flush(nic);
649 spin_unlock_irqrestore(&nic->cmd_lock, flags);
650}
651
652static void e100_disable_irq(struct nic *nic)
653{
654 unsigned long flags;
655
656 spin_lock_irqsave(&nic->cmd_lock, flags);
657 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
658 e100_write_flush(nic);
659 spin_unlock_irqrestore(&nic->cmd_lock, flags);
660}
661
662static void e100_hw_reset(struct nic *nic)
663{
664 /* Put CU and RU into idle with a selective reset to get
665 * device off of PCI bus */
666 iowrite32(selective_reset, &nic->csr->port);
667 e100_write_flush(nic); udelay(20);
668
669 /* Now fully reset device */
670 iowrite32(software_reset, &nic->csr->port);
671 e100_write_flush(nic); udelay(20);
672
673 /* Mask off our interrupt line - it's unmasked after reset */
674 e100_disable_irq(nic);
675}
676
677static int e100_self_test(struct nic *nic)
678{
679 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
680
681 /* Passing the self-test is a pretty good indication
682 * that the device can DMA to/from host memory */
683
684 nic->mem->selftest.signature = 0;
685 nic->mem->selftest.result = 0xFFFFFFFF;
686
687 iowrite32(selftest | dma_addr, &nic->csr->port);
688 e100_write_flush(nic);
689 /* Wait 10 msec for self-test to complete */
690 msleep(10);
691
692 /* Interrupts are enabled after self-test */
693 e100_disable_irq(nic);
694
695 /* Check results of self-test */
696 if (nic->mem->selftest.result != 0) {
697 netif_err(nic, hw, nic->netdev,
698 "Self-test failed: result=0x%08X\n",
699 nic->mem->selftest.result);
700 return -ETIMEDOUT;
701 }
702 if (nic->mem->selftest.signature == 0) {
703 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
704 return -ETIMEDOUT;
705 }
706
707 return 0;
708}
709
710static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
711{
712 u32 cmd_addr_data[3];
713 u8 ctrl;
714 int i, j;
715
716 /* Three cmds: write/erase enable, write data, write/erase disable */
717 cmd_addr_data[0] = op_ewen << (addr_len - 2);
718 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
719 le16_to_cpu(data);
720 cmd_addr_data[2] = op_ewds << (addr_len - 2);
721
722 /* Bit-bang cmds to write word to eeprom */
723 for (j = 0; j < 3; j++) {
724
725 /* Chip select */
726 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
727 e100_write_flush(nic); udelay(4);
728
729 for (i = 31; i >= 0; i--) {
730 ctrl = (cmd_addr_data[j] & (1 << i)) ?
731 eecs | eedi : eecs;
732 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
733 e100_write_flush(nic); udelay(4);
734
735 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
736 e100_write_flush(nic); udelay(4);
737 }
738 /* Wait 10 msec for cmd to complete */
739 msleep(10);
740
741 /* Chip deselect */
742 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
743 e100_write_flush(nic); udelay(4);
744 }
745};
746
747/* General technique stolen from the eepro100 driver - very clever */
748static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
749{
750 u32 cmd_addr_data;
751 u16 data = 0;
752 u8 ctrl;
753 int i;
754
755 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
756
757 /* Chip select */
758 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
759 e100_write_flush(nic); udelay(4);
760
761 /* Bit-bang to read word from eeprom */
762 for (i = 31; i >= 0; i--) {
763 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
764 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
765 e100_write_flush(nic); udelay(4);
766
767 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
768 e100_write_flush(nic); udelay(4);
769
770 /* Eeprom drives a dummy zero to EEDO after receiving
771 * complete address. Use this to adjust addr_len. */
772 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
773 if (!(ctrl & eedo) && i > 16) {
774 *addr_len -= (i - 16);
775 i = 17;
776 }
777
778 data = (data << 1) | (ctrl & eedo ? 1 : 0);
779 }
780
781 /* Chip deselect */
782 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
783 e100_write_flush(nic); udelay(4);
784
785 return cpu_to_le16(data);
786};
787
788/* Load entire EEPROM image into driver cache and validate checksum */
789static int e100_eeprom_load(struct nic *nic)
790{
791 u16 addr, addr_len = 8, checksum = 0;
792
793 /* Try reading with an 8-bit addr len to discover actual addr len */
794 e100_eeprom_read(nic, &addr_len, 0);
795 nic->eeprom_wc = 1 << addr_len;
796
797 for (addr = 0; addr < nic->eeprom_wc; addr++) {
798 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
799 if (addr < nic->eeprom_wc - 1)
800 checksum += le16_to_cpu(nic->eeprom[addr]);
801 }
802
803 /* The checksum, stored in the last word, is calculated such that
804 * the sum of words should be 0xBABA */
805 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
806 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
807 if (!eeprom_bad_csum_allow)
808 return -EAGAIN;
809 }
810
811 return 0;
812}
813
814/* Save (portion of) driver EEPROM cache to device and update checksum */
815static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
816{
817 u16 addr, addr_len = 8, checksum = 0;
818
819 /* Try reading with an 8-bit addr len to discover actual addr len */
820 e100_eeprom_read(nic, &addr_len, 0);
821 nic->eeprom_wc = 1 << addr_len;
822
823 if (start + count >= nic->eeprom_wc)
824 return -EINVAL;
825
826 for (addr = start; addr < start + count; addr++)
827 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
828
829 /* The checksum, stored in the last word, is calculated such that
830 * the sum of words should be 0xBABA */
831 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
832 checksum += le16_to_cpu(nic->eeprom[addr]);
833 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
834 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
835 nic->eeprom[nic->eeprom_wc - 1]);
836
837 return 0;
838}
839
840#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
841#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
842static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
843{
844 unsigned long flags;
845 unsigned int i;
846 int err = 0;
847
848 spin_lock_irqsave(&nic->cmd_lock, flags);
849
850 /* Previous command is accepted when SCB clears */
851 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
852 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
853 break;
854 cpu_relax();
855 if (unlikely(i > E100_WAIT_SCB_FAST))
856 udelay(5);
857 }
858 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
859 err = -EAGAIN;
860 goto err_unlock;
861 }
862
863 if (unlikely(cmd != cuc_resume))
864 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
865 iowrite8(cmd, &nic->csr->scb.cmd_lo);
866
867err_unlock:
868 spin_unlock_irqrestore(&nic->cmd_lock, flags);
869
870 return err;
871}
872
873static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
874 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
875{
876 struct cb *cb;
877 unsigned long flags;
878 int err;
879
880 spin_lock_irqsave(&nic->cb_lock, flags);
881
882 if (unlikely(!nic->cbs_avail)) {
883 err = -ENOMEM;
884 goto err_unlock;
885 }
886
887 cb = nic->cb_to_use;
888 nic->cb_to_use = cb->next;
889 nic->cbs_avail--;
890 cb->skb = skb;
891
892 err = cb_prepare(nic, cb, skb);
893 if (err)
894 goto err_unlock;
895
896 if (unlikely(!nic->cbs_avail))
897 err = -ENOSPC;
898
899
900 /* Order is important otherwise we'll be in a race with h/w:
901 * set S-bit in current first, then clear S-bit in previous. */
902 cb->command |= cpu_to_le16(cb_s);
903 dma_wmb();
904 cb->prev->command &= cpu_to_le16(~cb_s);
905
906 while (nic->cb_to_send != nic->cb_to_use) {
907 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
908 nic->cb_to_send->dma_addr))) {
909 /* Ok, here's where things get sticky. It's
910 * possible that we can't schedule the command
911 * because the controller is too busy, so
912 * let's just queue the command and try again
913 * when another command is scheduled. */
914 if (err == -ENOSPC) {
915 //request a reset
916 schedule_work(&nic->tx_timeout_task);
917 }
918 break;
919 } else {
920 nic->cuc_cmd = cuc_resume;
921 nic->cb_to_send = nic->cb_to_send->next;
922 }
923 }
924
925err_unlock:
926 spin_unlock_irqrestore(&nic->cb_lock, flags);
927
928 return err;
929}
930
931static int mdio_read(struct net_device *netdev, int addr, int reg)
932{
933 struct nic *nic = netdev_priv(netdev);
934 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
935}
936
937static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
938{
939 struct nic *nic = netdev_priv(netdev);
940
941 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
942}
943
944/* the standard mdio_ctrl() function for usual MII-compliant hardware */
945static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
946{
947 u32 data_out = 0;
948 unsigned int i;
949 unsigned long flags;
950
951
952 /*
953 * Stratus87247: we shouldn't be writing the MDI control
954 * register until the Ready bit shows True. Also, since
955 * manipulation of the MDI control registers is a multi-step
956 * procedure it should be done under lock.
957 */
958 spin_lock_irqsave(&nic->mdio_lock, flags);
959 for (i = 100; i; --i) {
960 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
961 break;
962 udelay(20);
963 }
964 if (unlikely(!i)) {
965 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
966 spin_unlock_irqrestore(&nic->mdio_lock, flags);
967 return 0; /* No way to indicate timeout error */
968 }
969 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
970
971 for (i = 0; i < 100; i++) {
972 udelay(20);
973 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
974 break;
975 }
976 spin_unlock_irqrestore(&nic->mdio_lock, flags);
977 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
978 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
979 dir == mdi_read ? "READ" : "WRITE",
980 addr, reg, data, data_out);
981 return (u16)data_out;
982}
983
984/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
985static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
986 u32 addr,
987 u32 dir,
988 u32 reg,
989 u16 data)
990{
991 if ((reg == MII_BMCR) && (dir == mdi_write)) {
992 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
993 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
994 MII_ADVERTISE);
995
996 /*
997 * Workaround Si issue where sometimes the part will not
998 * autoneg to 100Mbps even when advertised.
999 */
1000 if (advert & ADVERTISE_100FULL)
1001 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
1002 else if (advert & ADVERTISE_100HALF)
1003 data |= BMCR_SPEED100;
1004 }
1005 }
1006 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1007}
1008
1009/* Fully software-emulated mdio_ctrl() function for cards without
1010 * MII-compliant PHYs.
1011 * For now, this is mainly geared towards 80c24 support; in case of further
1012 * requirements for other types (i82503, ...?) either extend this mechanism
1013 * or split it, whichever is cleaner.
1014 */
1015static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1016 u32 addr,
1017 u32 dir,
1018 u32 reg,
1019 u16 data)
1020{
1021 /* might need to allocate a netdev_priv'ed register array eventually
1022 * to be able to record state changes, but for now
1023 * some fully hardcoded register handling ought to be ok I guess. */
1024
1025 if (dir == mdi_read) {
1026 switch (reg) {
1027 case MII_BMCR:
1028 /* Auto-negotiation, right? */
1029 return BMCR_ANENABLE |
1030 BMCR_FULLDPLX;
1031 case MII_BMSR:
1032 return BMSR_LSTATUS /* for mii_link_ok() */ |
1033 BMSR_ANEGCAPABLE |
1034 BMSR_10FULL;
1035 case MII_ADVERTISE:
1036 /* 80c24 is a "combo card" PHY, right? */
1037 return ADVERTISE_10HALF |
1038 ADVERTISE_10FULL;
1039 default:
1040 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1041 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1042 dir == mdi_read ? "READ" : "WRITE",
1043 addr, reg, data);
1044 return 0xFFFF;
1045 }
1046 } else {
1047 switch (reg) {
1048 default:
1049 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1050 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1051 dir == mdi_read ? "READ" : "WRITE",
1052 addr, reg, data);
1053 return 0xFFFF;
1054 }
1055 }
1056}
1057static inline int e100_phy_supports_mii(struct nic *nic)
1058{
1059 /* for now, just check it by comparing whether we
1060 are using MII software emulation.
1061 */
1062 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1063}
1064
1065static void e100_get_defaults(struct nic *nic)
1066{
1067 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1068 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1069
1070 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1071 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1072 if (nic->mac == mac_unknown)
1073 nic->mac = mac_82557_D100_A;
1074
1075 nic->params.rfds = rfds;
1076 nic->params.cbs = cbs;
1077
1078 /* Quadwords to DMA into FIFO before starting frame transmit */
1079 nic->tx_threshold = 0xE0;
1080
1081 /* no interrupt for every tx completion, delay = 256us if not 557 */
1082 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1083 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1084
1085 /* Template for a freshly allocated RFD */
1086 nic->blank_rfd.command = 0;
1087 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1088 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1089
1090 /* MII setup */
1091 nic->mii.phy_id_mask = 0x1F;
1092 nic->mii.reg_num_mask = 0x1F;
1093 nic->mii.dev = nic->netdev;
1094 nic->mii.mdio_read = mdio_read;
1095 nic->mii.mdio_write = mdio_write;
1096}
1097
1098static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1099{
1100 struct config *config = &cb->u.config;
1101 u8 *c = (u8 *)config;
1102 struct net_device *netdev = nic->netdev;
1103
1104 cb->command = cpu_to_le16(cb_config);
1105
1106 memset(config, 0, sizeof(struct config));
1107
1108 config->byte_count = 0x16; /* bytes in this struct */
1109 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1110 config->direct_rx_dma = 0x1; /* reserved */
1111 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1112 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1113 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1114 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1115 if (e100_phy_supports_mii(nic))
1116 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1117 config->pad10 = 0x6;
1118 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1119 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1120 config->ifs = 0x6; /* x16 = inter frame spacing */
1121 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1122 config->pad15_1 = 0x1;
1123 config->pad15_2 = 0x1;
1124 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1125 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1126 config->tx_padding = 0x1; /* 1=pad short frames */
1127 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1128 config->pad18 = 0x1;
1129 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1130 config->pad20_1 = 0x1F;
1131 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1132 config->pad21_1 = 0x5;
1133
1134 config->adaptive_ifs = nic->adaptive_ifs;
1135 config->loopback = nic->loopback;
1136
1137 if (nic->mii.force_media && nic->mii.full_duplex)
1138 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1139
1140 if (nic->flags & promiscuous || nic->loopback) {
1141 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1142 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1143 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1144 }
1145
1146 if (unlikely(netdev->features & NETIF_F_RXFCS))
1147 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1148
1149 if (nic->flags & multicast_all)
1150 config->multicast_all = 0x1; /* 1=accept, 0=no */
1151
1152 /* disable WoL when up */
1153 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1154 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1155
1156 if (nic->mac >= mac_82558_D101_A4) {
1157 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1158 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1159 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1160 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1161 if (nic->mac >= mac_82559_D101M) {
1162 config->tno_intr = 0x1; /* TCO stats enable */
1163 /* Enable TCO in extended config */
1164 if (nic->mac >= mac_82551_10) {
1165 config->byte_count = 0x20; /* extended bytes */
1166 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1167 }
1168 } else {
1169 config->standard_stat_counter = 0x0;
1170 }
1171 }
1172
1173 if (netdev->features & NETIF_F_RXALL) {
1174 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1175 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1176 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1177 }
1178
1179 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1180 c + 0);
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1182 c + 8);
1183 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1184 c + 16);
1185 return 0;
1186}
1187
1188/*************************************************************************
1189* CPUSaver parameters
1190*
1191* All CPUSaver parameters are 16-bit literals that are part of a
1192* "move immediate value" instruction. By changing the value of
1193* the literal in the instruction before the code is loaded, the
1194* driver can change the algorithm.
1195*
1196* INTDELAY - This loads the dead-man timer with its initial value.
1197* When this timer expires the interrupt is asserted, and the
1198* timer is reset each time a new packet is received. (see
1199* BUNDLEMAX below to set the limit on number of chained packets)
1200* The current default is 0x600 or 1536. Experiments show that
1201* the value should probably stay within the 0x200 - 0x1000.
1202*
1203* BUNDLEMAX -
1204* This sets the maximum number of frames that will be bundled. In
1205* some situations, such as the TCP windowing algorithm, it may be
1206* better to limit the growth of the bundle size than let it go as
1207* high as it can, because that could cause too much added latency.
1208* The default is six, because this is the number of packets in the
1209* default TCP window size. A value of 1 would make CPUSaver indicate
1210* an interrupt for every frame received. If you do not want to put
1211* a limit on the bundle size, set this value to xFFFF.
1212*
1213* BUNDLESMALL -
1214* This contains a bit-mask describing the minimum size frame that
1215* will be bundled. The default masks the lower 7 bits, which means
1216* that any frame less than 128 bytes in length will not be bundled,
1217* but will instead immediately generate an interrupt. This does
1218* not affect the current bundle in any way. Any frame that is 128
1219* bytes or large will be bundled normally. This feature is meant
1220* to provide immediate indication of ACK frames in a TCP environment.
1221* Customers were seeing poor performance when a machine with CPUSaver
1222* enabled was sending but not receiving. The delay introduced when
1223* the ACKs were received was enough to reduce total throughput, because
1224* the sender would sit idle until the ACK was finally seen.
1225*
1226* The current default is 0xFF80, which masks out the lower 7 bits.
1227* This means that any frame which is x7F (127) bytes or smaller
1228* will cause an immediate interrupt. Because this value must be a
1229* bit mask, there are only a few valid values that can be used. To
1230* turn this feature off, the driver can write the value xFFFF to the
1231* lower word of this instruction (in the same way that the other
1232* parameters are used). Likewise, a value of 0xF800 (2047) would
1233* cause an interrupt to be generated for every frame, because all
1234* standard Ethernet frames are <= 2047 bytes in length.
1235*************************************************************************/
1236
1237/* if you wish to disable the ucode functionality, while maintaining the
1238 * workarounds it provides, set the following defines to:
1239 * BUNDLESMALL 0
1240 * BUNDLEMAX 1
1241 * INTDELAY 1
1242 */
1243#define BUNDLESMALL 1
1244#define BUNDLEMAX (u16)6
1245#define INTDELAY (u16)1536 /* 0x600 */
1246
1247/* Initialize firmware */
1248static const struct firmware *e100_request_firmware(struct nic *nic)
1249{
1250 const char *fw_name;
1251 const struct firmware *fw = nic->fw;
1252 u8 timer, bundle, min_size;
1253 int err = 0;
1254 bool required = false;
1255
1256 /* do not load u-code for ICH devices */
1257 if (nic->flags & ich)
1258 return NULL;
1259
1260 /* Search for ucode match against h/w revision
1261 *
1262 * Based on comments in the source code for the FreeBSD fxp
1263 * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1264 *
1265 * "fixes for bugs in the B-step hardware (specifically, bugs
1266 * with Inline Receive)."
1267 *
1268 * So we must fail if it cannot be loaded.
1269 *
1270 * The other microcode files are only required for the optional
1271 * CPUSaver feature. Nice to have, but no reason to fail.
1272 */
1273 if (nic->mac == mac_82559_D101M) {
1274 fw_name = FIRMWARE_D101M;
1275 } else if (nic->mac == mac_82559_D101S) {
1276 fw_name = FIRMWARE_D101S;
1277 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1278 fw_name = FIRMWARE_D102E;
1279 required = true;
1280 } else { /* No ucode on other devices */
1281 return NULL;
1282 }
1283
1284 /* If the firmware has not previously been loaded, request a pointer
1285 * to it. If it was previously loaded, we are reinitializing the
1286 * adapter, possibly in a resume from hibernate, in which case
1287 * request_firmware() cannot be used.
1288 */
1289 if (!fw)
1290 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1291
1292 if (err) {
1293 if (required) {
1294 netif_err(nic, probe, nic->netdev,
1295 "Failed to load firmware \"%s\": %d\n",
1296 fw_name, err);
1297 return ERR_PTR(err);
1298 } else {
1299 netif_info(nic, probe, nic->netdev,
1300 "CPUSaver disabled. Needs \"%s\": %d\n",
1301 fw_name, err);
1302 return NULL;
1303 }
1304 }
1305
1306 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1307 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1308 if (fw->size != UCODE_SIZE * 4 + 3) {
1309 netif_err(nic, probe, nic->netdev,
1310 "Firmware \"%s\" has wrong size %zu\n",
1311 fw_name, fw->size);
1312 release_firmware(fw);
1313 return ERR_PTR(-EINVAL);
1314 }
1315
1316 /* Read timer, bundle and min_size from end of firmware blob */
1317 timer = fw->data[UCODE_SIZE * 4];
1318 bundle = fw->data[UCODE_SIZE * 4 + 1];
1319 min_size = fw->data[UCODE_SIZE * 4 + 2];
1320
1321 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1322 min_size >= UCODE_SIZE) {
1323 netif_err(nic, probe, nic->netdev,
1324 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1325 fw_name, timer, bundle, min_size);
1326 release_firmware(fw);
1327 return ERR_PTR(-EINVAL);
1328 }
1329
1330 /* OK, firmware is validated and ready to use. Save a pointer
1331 * to it in the nic */
1332 nic->fw = fw;
1333 return fw;
1334}
1335
1336static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1337 struct sk_buff *skb)
1338{
1339 const struct firmware *fw = (void *)skb;
1340 u8 timer, bundle, min_size;
1341
1342 /* It's not a real skb; we just abused the fact that e100_exec_cb
1343 will pass it through to here... */
1344 cb->skb = NULL;
1345
1346 /* firmware is stored as little endian already */
1347 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1348
1349 /* Read timer, bundle and min_size from end of firmware blob */
1350 timer = fw->data[UCODE_SIZE * 4];
1351 bundle = fw->data[UCODE_SIZE * 4 + 1];
1352 min_size = fw->data[UCODE_SIZE * 4 + 2];
1353
1354 /* Insert user-tunable settings in cb->u.ucode */
1355 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1356 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1357 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1358 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1359 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1360 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1361
1362 cb->command = cpu_to_le16(cb_ucode | cb_el);
1363 return 0;
1364}
1365
1366static inline int e100_load_ucode_wait(struct nic *nic)
1367{
1368 const struct firmware *fw;
1369 int err = 0, counter = 50;
1370 struct cb *cb = nic->cb_to_clean;
1371
1372 fw = e100_request_firmware(nic);
1373 /* If it's NULL, then no ucode is required */
1374 if (!fw || IS_ERR(fw))
1375 return PTR_ERR(fw);
1376
1377 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1378 netif_err(nic, probe, nic->netdev,
1379 "ucode cmd failed with error %d\n", err);
1380
1381 /* must restart cuc */
1382 nic->cuc_cmd = cuc_start;
1383
1384 /* wait for completion */
1385 e100_write_flush(nic);
1386 udelay(10);
1387
1388 /* wait for possibly (ouch) 500ms */
1389 while (!(cb->status & cpu_to_le16(cb_complete))) {
1390 msleep(10);
1391 if (!--counter) break;
1392 }
1393
1394 /* ack any interrupts, something could have been set */
1395 iowrite8(~0, &nic->csr->scb.stat_ack);
1396
1397 /* if the command failed, or is not OK, notify and return */
1398 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1399 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1400 err = -EPERM;
1401 }
1402
1403 return err;
1404}
1405
1406static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1407 struct sk_buff *skb)
1408{
1409 cb->command = cpu_to_le16(cb_iaaddr);
1410 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1411 return 0;
1412}
1413
1414static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1415{
1416 cb->command = cpu_to_le16(cb_dump);
1417 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1418 offsetof(struct mem, dump_buf));
1419 return 0;
1420}
1421
1422static int e100_phy_check_without_mii(struct nic *nic)
1423{
1424 u8 phy_type;
1425 int without_mii;
1426
1427 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1428
1429 switch (phy_type) {
1430 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1431 case I82503: /* Non-MII PHY; UNTESTED! */
1432 case S80C24: /* Non-MII PHY; tested and working */
1433 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1434 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1435 * doesn't have a programming interface of any sort. The
1436 * media is sensed automatically based on how the link partner
1437 * is configured. This is, in essence, manual configuration.
1438 */
1439 netif_info(nic, probe, nic->netdev,
1440 "found MII-less i82503 or 80c24 or other PHY\n");
1441
1442 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1443 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1444
1445 /* these might be needed for certain MII-less cards...
1446 * nic->flags |= ich;
1447 * nic->flags |= ich_10h_workaround; */
1448
1449 without_mii = 1;
1450 break;
1451 default:
1452 without_mii = 0;
1453 break;
1454 }
1455 return without_mii;
1456}
1457
1458#define NCONFIG_AUTO_SWITCH 0x0080
1459#define MII_NSC_CONG MII_RESV1
1460#define NSC_CONG_ENABLE 0x0100
1461#define NSC_CONG_TXREADY 0x0400
1462#define ADVERTISE_FC_SUPPORTED 0x0400
1463static int e100_phy_init(struct nic *nic)
1464{
1465 struct net_device *netdev = nic->netdev;
1466 u32 addr;
1467 u16 bmcr, stat, id_lo, id_hi, cong;
1468
1469 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1470 for (addr = 0; addr < 32; addr++) {
1471 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1472 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1473 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1474 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1475 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1476 break;
1477 }
1478 if (addr == 32) {
1479 /* uhoh, no PHY detected: check whether we seem to be some
1480 * weird, rare variant which is *known* to not have any MII.
1481 * But do this AFTER MII checking only, since this does
1482 * lookup of EEPROM values which may easily be unreliable. */
1483 if (e100_phy_check_without_mii(nic))
1484 return 0; /* simply return and hope for the best */
1485 else {
1486 /* for unknown cases log a fatal error */
1487 netif_err(nic, hw, nic->netdev,
1488 "Failed to locate any known PHY, aborting\n");
1489 return -EAGAIN;
1490 }
1491 } else
1492 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1493 "phy_addr = %d\n", nic->mii.phy_id);
1494
1495 /* Get phy ID */
1496 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1497 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1498 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1499 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1500 "phy ID = 0x%08X\n", nic->phy);
1501
1502 /* Select the phy and isolate the rest */
1503 for (addr = 0; addr < 32; addr++) {
1504 if (addr != nic->mii.phy_id) {
1505 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1506 } else if (nic->phy != phy_82552_v) {
1507 bmcr = mdio_read(netdev, addr, MII_BMCR);
1508 mdio_write(netdev, addr, MII_BMCR,
1509 bmcr & ~BMCR_ISOLATE);
1510 }
1511 }
1512 /*
1513 * Workaround for 82552:
1514 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1515 * other phy_id's) using bmcr value from addr discovery loop above.
1516 */
1517 if (nic->phy == phy_82552_v)
1518 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1519 bmcr & ~BMCR_ISOLATE);
1520
1521 /* Handle National tx phys */
1522#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1523 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1524 /* Disable congestion control */
1525 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1526 cong |= NSC_CONG_TXREADY;
1527 cong &= ~NSC_CONG_ENABLE;
1528 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1529 }
1530
1531 if (nic->phy == phy_82552_v) {
1532 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1533
1534 /* assign special tweaked mdio_ctrl() function */
1535 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1536
1537 /* Workaround Si not advertising flow-control during autoneg */
1538 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1539 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1540
1541 /* Reset for the above changes to take effect */
1542 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1543 bmcr |= BMCR_RESET;
1544 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1545 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1546 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1547 (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1548 /* enable/disable MDI/MDI-X auto-switching. */
1549 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1550 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1551 }
1552
1553 return 0;
1554}
1555
1556static int e100_hw_init(struct nic *nic)
1557{
1558 int err = 0;
1559
1560 e100_hw_reset(nic);
1561
1562 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1563 if (!in_interrupt() && (err = e100_self_test(nic)))
1564 return err;
1565
1566 if ((err = e100_phy_init(nic)))
1567 return err;
1568 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1569 return err;
1570 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1571 return err;
1572 if ((err = e100_load_ucode_wait(nic)))
1573 return err;
1574 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1575 return err;
1576 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1577 return err;
1578 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1579 nic->dma_addr + offsetof(struct mem, stats))))
1580 return err;
1581 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1582 return err;
1583
1584 e100_disable_irq(nic);
1585
1586 return 0;
1587}
1588
1589static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1590{
1591 struct net_device *netdev = nic->netdev;
1592 struct netdev_hw_addr *ha;
1593 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1594
1595 cb->command = cpu_to_le16(cb_multi);
1596 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1597 i = 0;
1598 netdev_for_each_mc_addr(ha, netdev) {
1599 if (i == count)
1600 break;
1601 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1602 ETH_ALEN);
1603 }
1604 return 0;
1605}
1606
1607static void e100_set_multicast_list(struct net_device *netdev)
1608{
1609 struct nic *nic = netdev_priv(netdev);
1610
1611 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1612 "mc_count=%d, flags=0x%04X\n",
1613 netdev_mc_count(netdev), netdev->flags);
1614
1615 if (netdev->flags & IFF_PROMISC)
1616 nic->flags |= promiscuous;
1617 else
1618 nic->flags &= ~promiscuous;
1619
1620 if (netdev->flags & IFF_ALLMULTI ||
1621 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1622 nic->flags |= multicast_all;
1623 else
1624 nic->flags &= ~multicast_all;
1625
1626 e100_exec_cb(nic, NULL, e100_configure);
1627 e100_exec_cb(nic, NULL, e100_multi);
1628}
1629
1630static void e100_update_stats(struct nic *nic)
1631{
1632 struct net_device *dev = nic->netdev;
1633 struct net_device_stats *ns = &dev->stats;
1634 struct stats *s = &nic->mem->stats;
1635 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1636 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1637 &s->complete;
1638
1639 /* Device's stats reporting may take several microseconds to
1640 * complete, so we're always waiting for results of the
1641 * previous command. */
1642
1643 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1644 *complete = 0;
1645 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1646 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1647 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1648 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1649 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1650 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1651 ns->collisions += nic->tx_collisions;
1652 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1653 le32_to_cpu(s->tx_lost_crs);
1654 nic->rx_short_frame_errors +=
1655 le32_to_cpu(s->rx_short_frame_errors);
1656 ns->rx_length_errors = nic->rx_short_frame_errors +
1657 nic->rx_over_length_errors;
1658 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1659 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1660 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1661 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1662 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1663 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1664 le32_to_cpu(s->rx_alignment_errors) +
1665 le32_to_cpu(s->rx_short_frame_errors) +
1666 le32_to_cpu(s->rx_cdt_errors);
1667 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1668 nic->tx_single_collisions +=
1669 le32_to_cpu(s->tx_single_collisions);
1670 nic->tx_multiple_collisions +=
1671 le32_to_cpu(s->tx_multiple_collisions);
1672 if (nic->mac >= mac_82558_D101_A4) {
1673 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1674 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1675 nic->rx_fc_unsupported +=
1676 le32_to_cpu(s->fc_rcv_unsupported);
1677 if (nic->mac >= mac_82559_D101M) {
1678 nic->tx_tco_frames +=
1679 le16_to_cpu(s->xmt_tco_frames);
1680 nic->rx_tco_frames +=
1681 le16_to_cpu(s->rcv_tco_frames);
1682 }
1683 }
1684 }
1685
1686
1687 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1688 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1689 "exec cuc_dump_reset failed\n");
1690}
1691
1692static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1693{
1694 /* Adjust inter-frame-spacing (IFS) between two transmits if
1695 * we're getting collisions on a half-duplex connection. */
1696
1697 if (duplex == DUPLEX_HALF) {
1698 u32 prev = nic->adaptive_ifs;
1699 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1700
1701 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1702 (nic->tx_frames > min_frames)) {
1703 if (nic->adaptive_ifs < 60)
1704 nic->adaptive_ifs += 5;
1705 } else if (nic->tx_frames < min_frames) {
1706 if (nic->adaptive_ifs >= 5)
1707 nic->adaptive_ifs -= 5;
1708 }
1709 if (nic->adaptive_ifs != prev)
1710 e100_exec_cb(nic, NULL, e100_configure);
1711 }
1712}
1713
1714static void e100_watchdog(struct timer_list *t)
1715{
1716 struct nic *nic = from_timer(nic, t, watchdog);
1717 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1718 u32 speed;
1719
1720 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1721 "right now = %ld\n", jiffies);
1722
1723 /* mii library handles link maintenance tasks */
1724
1725 mii_ethtool_gset(&nic->mii, &cmd);
1726 speed = ethtool_cmd_speed(&cmd);
1727
1728 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1729 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1730 speed == SPEED_100 ? 100 : 10,
1731 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1732 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1733 netdev_info(nic->netdev, "NIC Link is Down\n");
1734 }
1735
1736 mii_check_link(&nic->mii);
1737
1738 /* Software generated interrupt to recover from (rare) Rx
1739 * allocation failure.
1740 * Unfortunately have to use a spinlock to not re-enable interrupts
1741 * accidentally, due to hardware that shares a register between the
1742 * interrupt mask bit and the SW Interrupt generation bit */
1743 spin_lock_irq(&nic->cmd_lock);
1744 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1745 e100_write_flush(nic);
1746 spin_unlock_irq(&nic->cmd_lock);
1747
1748 e100_update_stats(nic);
1749 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1750
1751 if (nic->mac <= mac_82557_D100_C)
1752 /* Issue a multicast command to workaround a 557 lock up */
1753 e100_set_multicast_list(nic->netdev);
1754
1755 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1756 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1757 nic->flags |= ich_10h_workaround;
1758 else
1759 nic->flags &= ~ich_10h_workaround;
1760
1761 mod_timer(&nic->watchdog,
1762 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1763}
1764
1765static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1766 struct sk_buff *skb)
1767{
1768 dma_addr_t dma_addr;
1769 cb->command = nic->tx_command;
1770
1771 dma_addr = pci_map_single(nic->pdev,
1772 skb->data, skb->len, PCI_DMA_TODEVICE);
1773 /* If we can't map the skb, have the upper layer try later */
1774 if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
1775 dev_kfree_skb_any(skb);
1776 skb = NULL;
1777 return -ENOMEM;
1778 }
1779
1780 /*
1781 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1782 * testing, ie sending frames with bad CRC.
1783 */
1784 if (unlikely(skb->no_fcs))
1785 cb->command |= cpu_to_le16(cb_tx_nc);
1786 else
1787 cb->command &= ~cpu_to_le16(cb_tx_nc);
1788
1789 /* interrupt every 16 packets regardless of delay */
1790 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1791 cb->command |= cpu_to_le16(cb_i);
1792 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1793 cb->u.tcb.tcb_byte_count = 0;
1794 cb->u.tcb.threshold = nic->tx_threshold;
1795 cb->u.tcb.tbd_count = 1;
1796 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1797 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1798 skb_tx_timestamp(skb);
1799 return 0;
1800}
1801
1802static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1803 struct net_device *netdev)
1804{
1805 struct nic *nic = netdev_priv(netdev);
1806 int err;
1807
1808 if (nic->flags & ich_10h_workaround) {
1809 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1810 Issue a NOP command followed by a 1us delay before
1811 issuing the Tx command. */
1812 if (e100_exec_cmd(nic, cuc_nop, 0))
1813 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1814 "exec cuc_nop failed\n");
1815 udelay(1);
1816 }
1817
1818 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1819
1820 switch (err) {
1821 case -ENOSPC:
1822 /* We queued the skb, but now we're out of space. */
1823 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1824 "No space for CB\n");
1825 netif_stop_queue(netdev);
1826 break;
1827 case -ENOMEM:
1828 /* This is a hard error - log it. */
1829 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1830 "Out of Tx resources, returning skb\n");
1831 netif_stop_queue(netdev);
1832 return NETDEV_TX_BUSY;
1833 }
1834
1835 return NETDEV_TX_OK;
1836}
1837
1838static int e100_tx_clean(struct nic *nic)
1839{
1840 struct net_device *dev = nic->netdev;
1841 struct cb *cb;
1842 int tx_cleaned = 0;
1843
1844 spin_lock(&nic->cb_lock);
1845
1846 /* Clean CBs marked complete */
1847 for (cb = nic->cb_to_clean;
1848 cb->status & cpu_to_le16(cb_complete);
1849 cb = nic->cb_to_clean = cb->next) {
1850 dma_rmb(); /* read skb after status */
1851 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1852 "cb[%d]->status = 0x%04X\n",
1853 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1854 cb->status);
1855
1856 if (likely(cb->skb != NULL)) {
1857 dev->stats.tx_packets++;
1858 dev->stats.tx_bytes += cb->skb->len;
1859
1860 pci_unmap_single(nic->pdev,
1861 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1862 le16_to_cpu(cb->u.tcb.tbd.size),
1863 PCI_DMA_TODEVICE);
1864 dev_kfree_skb_any(cb->skb);
1865 cb->skb = NULL;
1866 tx_cleaned = 1;
1867 }
1868 cb->status = 0;
1869 nic->cbs_avail++;
1870 }
1871
1872 spin_unlock(&nic->cb_lock);
1873
1874 /* Recover from running out of Tx resources in xmit_frame */
1875 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1876 netif_wake_queue(nic->netdev);
1877
1878 return tx_cleaned;
1879}
1880
1881static void e100_clean_cbs(struct nic *nic)
1882{
1883 if (nic->cbs) {
1884 while (nic->cbs_avail != nic->params.cbs.count) {
1885 struct cb *cb = nic->cb_to_clean;
1886 if (cb->skb) {
1887 pci_unmap_single(nic->pdev,
1888 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1889 le16_to_cpu(cb->u.tcb.tbd.size),
1890 PCI_DMA_TODEVICE);
1891 dev_kfree_skb(cb->skb);
1892 }
1893 nic->cb_to_clean = nic->cb_to_clean->next;
1894 nic->cbs_avail++;
1895 }
1896 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1897 nic->cbs = NULL;
1898 nic->cbs_avail = 0;
1899 }
1900 nic->cuc_cmd = cuc_start;
1901 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1902 nic->cbs;
1903}
1904
1905static int e100_alloc_cbs(struct nic *nic)
1906{
1907 struct cb *cb;
1908 unsigned int i, count = nic->params.cbs.count;
1909
1910 nic->cuc_cmd = cuc_start;
1911 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1912 nic->cbs_avail = 0;
1913
1914 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
1915 &nic->cbs_dma_addr);
1916 if (!nic->cbs)
1917 return -ENOMEM;
1918
1919 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1920 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1921 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1922
1923 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1924 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1925 ((i+1) % count) * sizeof(struct cb));
1926 }
1927
1928 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1929 nic->cbs_avail = count;
1930
1931 return 0;
1932}
1933
1934static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1935{
1936 if (!nic->rxs) return;
1937 if (RU_SUSPENDED != nic->ru_running) return;
1938
1939 /* handle init time starts */
1940 if (!rx) rx = nic->rxs;
1941
1942 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1943 if (rx->skb) {
1944 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1945 nic->ru_running = RU_RUNNING;
1946 }
1947}
1948
1949#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1950static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1951{
1952 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1953 return -ENOMEM;
1954
1955 /* Init, and map the RFD. */
1956 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1957 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1958 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1959
1960 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1961 dev_kfree_skb_any(rx->skb);
1962 rx->skb = NULL;
1963 rx->dma_addr = 0;
1964 return -ENOMEM;
1965 }
1966
1967 /* Link the RFD to end of RFA by linking previous RFD to
1968 * this one. We are safe to touch the previous RFD because
1969 * it is protected by the before last buffer's el bit being set */
1970 if (rx->prev->skb) {
1971 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1972 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1973 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1974 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1975 }
1976
1977 return 0;
1978}
1979
1980static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1981 unsigned int *work_done, unsigned int work_to_do)
1982{
1983 struct net_device *dev = nic->netdev;
1984 struct sk_buff *skb = rx->skb;
1985 struct rfd *rfd = (struct rfd *)skb->data;
1986 u16 rfd_status, actual_size;
1987 u16 fcs_pad = 0;
1988
1989 if (unlikely(work_done && *work_done >= work_to_do))
1990 return -EAGAIN;
1991
1992 /* Need to sync before taking a peek at cb_complete bit */
1993 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1994 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1995 rfd_status = le16_to_cpu(rfd->status);
1996
1997 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1998 "status=0x%04X\n", rfd_status);
1999 dma_rmb(); /* read size after status bit */
2000
2001 /* If data isn't ready, nothing to indicate */
2002 if (unlikely(!(rfd_status & cb_complete))) {
2003 /* If the next buffer has the el bit, but we think the receiver
2004 * is still running, check to see if it really stopped while
2005 * we had interrupts off.
2006 * This allows for a fast restart without re-enabling
2007 * interrupts */
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running))
2010
2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2012 nic->ru_running = RU_SUSPENDED;
2013 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2014 sizeof(struct rfd),
2015 PCI_DMA_FROMDEVICE);
2016 return -ENODATA;
2017 }
2018
2019 /* Get actual data size */
2020 if (unlikely(dev->features & NETIF_F_RXFCS))
2021 fcs_pad = 4;
2022 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2023 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2024 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2025
2026 /* Get data */
2027 pci_unmap_single(nic->pdev, rx->dma_addr,
2028 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2029
2030 /* If this buffer has the el bit, but we think the receiver
2031 * is still running, check to see if it really stopped while
2032 * we had interrupts off.
2033 * This allows for a fast restart without re-enabling interrupts.
2034 * This can happen when the RU sees the size change but also sees
2035 * the el bit set. */
2036 if ((le16_to_cpu(rfd->command) & cb_el) &&
2037 (RU_RUNNING == nic->ru_running)) {
2038
2039 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2040 nic->ru_running = RU_SUSPENDED;
2041 }
2042
2043 /* Pull off the RFD and put the actual data (minus eth hdr) */
2044 skb_reserve(skb, sizeof(struct rfd));
2045 skb_put(skb, actual_size);
2046 skb->protocol = eth_type_trans(skb, nic->netdev);
2047
2048 /* If we are receiving all frames, then don't bother
2049 * checking for errors.
2050 */
2051 if (unlikely(dev->features & NETIF_F_RXALL)) {
2052 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2053 /* Received oversized frame, but keep it. */
2054 nic->rx_over_length_errors++;
2055 goto process_skb;
2056 }
2057
2058 if (unlikely(!(rfd_status & cb_ok))) {
2059 /* Don't indicate if hardware indicates errors */
2060 dev_kfree_skb_any(skb);
2061 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2062 /* Don't indicate oversized frames */
2063 nic->rx_over_length_errors++;
2064 dev_kfree_skb_any(skb);
2065 } else {
2066process_skb:
2067 dev->stats.rx_packets++;
2068 dev->stats.rx_bytes += (actual_size - fcs_pad);
2069 netif_receive_skb(skb);
2070 if (work_done)
2071 (*work_done)++;
2072 }
2073
2074 rx->skb = NULL;
2075
2076 return 0;
2077}
2078
2079static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2080 unsigned int work_to_do)
2081{
2082 struct rx *rx;
2083 int restart_required = 0, err = 0;
2084 struct rx *old_before_last_rx, *new_before_last_rx;
2085 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2086
2087 /* Indicate newly arrived packets */
2088 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2089 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2090 /* Hit quota or no more to clean */
2091 if (-EAGAIN == err || -ENODATA == err)
2092 break;
2093 }
2094
2095
2096 /* On EAGAIN, hit quota so have more work to do, restart once
2097 * cleanup is complete.
2098 * Else, are we already rnr? then pay attention!!! this ensures that
2099 * the state machine progression never allows a start with a
2100 * partially cleaned list, avoiding a race between hardware
2101 * and rx_to_clean when in NAPI mode */
2102 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2103 restart_required = 1;
2104
2105 old_before_last_rx = nic->rx_to_use->prev->prev;
2106 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2107
2108 /* Alloc new skbs to refill list */
2109 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2110 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2111 break; /* Better luck next time (see watchdog) */
2112 }
2113
2114 new_before_last_rx = nic->rx_to_use->prev->prev;
2115 if (new_before_last_rx != old_before_last_rx) {
2116 /* Set the el-bit on the buffer that is before the last buffer.
2117 * This lets us update the next pointer on the last buffer
2118 * without worrying about hardware touching it.
2119 * We set the size to 0 to prevent hardware from touching this
2120 * buffer.
2121 * When the hardware hits the before last buffer with el-bit
2122 * and size of 0, it will RNR interrupt, the RUS will go into
2123 * the No Resources state. It will not complete nor write to
2124 * this buffer. */
2125 new_before_last_rfd =
2126 (struct rfd *)new_before_last_rx->skb->data;
2127 new_before_last_rfd->size = 0;
2128 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2129 pci_dma_sync_single_for_device(nic->pdev,
2130 new_before_last_rx->dma_addr, sizeof(struct rfd),
2131 PCI_DMA_BIDIRECTIONAL);
2132
2133 /* Now that we have a new stopping point, we can clear the old
2134 * stopping point. We must sync twice to get the proper
2135 * ordering on the hardware side of things. */
2136 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2137 pci_dma_sync_single_for_device(nic->pdev,
2138 old_before_last_rx->dma_addr, sizeof(struct rfd),
2139 PCI_DMA_BIDIRECTIONAL);
2140 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2141 + ETH_FCS_LEN);
2142 pci_dma_sync_single_for_device(nic->pdev,
2143 old_before_last_rx->dma_addr, sizeof(struct rfd),
2144 PCI_DMA_BIDIRECTIONAL);
2145 }
2146
2147 if (restart_required) {
2148 // ack the rnr?
2149 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2150 e100_start_receiver(nic, nic->rx_to_clean);
2151 if (work_done)
2152 (*work_done)++;
2153 }
2154}
2155
2156static void e100_rx_clean_list(struct nic *nic)
2157{
2158 struct rx *rx;
2159 unsigned int i, count = nic->params.rfds.count;
2160
2161 nic->ru_running = RU_UNINITIALIZED;
2162
2163 if (nic->rxs) {
2164 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2165 if (rx->skb) {
2166 pci_unmap_single(nic->pdev, rx->dma_addr,
2167 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2168 dev_kfree_skb(rx->skb);
2169 }
2170 }
2171 kfree(nic->rxs);
2172 nic->rxs = NULL;
2173 }
2174
2175 nic->rx_to_use = nic->rx_to_clean = NULL;
2176}
2177
2178static int e100_rx_alloc_list(struct nic *nic)
2179{
2180 struct rx *rx;
2181 unsigned int i, count = nic->params.rfds.count;
2182 struct rfd *before_last;
2183
2184 nic->rx_to_use = nic->rx_to_clean = NULL;
2185 nic->ru_running = RU_UNINITIALIZED;
2186
2187 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2188 return -ENOMEM;
2189
2190 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2191 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2192 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2193 if (e100_rx_alloc_skb(nic, rx)) {
2194 e100_rx_clean_list(nic);
2195 return -ENOMEM;
2196 }
2197 }
2198 /* Set the el-bit on the buffer that is before the last buffer.
2199 * This lets us update the next pointer on the last buffer without
2200 * worrying about hardware touching it.
2201 * We set the size to 0 to prevent hardware from touching this buffer.
2202 * When the hardware hits the before last buffer with el-bit and size
2203 * of 0, it will RNR interrupt, the RU will go into the No Resources
2204 * state. It will not complete nor write to this buffer. */
2205 rx = nic->rxs->prev->prev;
2206 before_last = (struct rfd *)rx->skb->data;
2207 before_last->command |= cpu_to_le16(cb_el);
2208 before_last->size = 0;
2209 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2210 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2211
2212 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2213 nic->ru_running = RU_SUSPENDED;
2214
2215 return 0;
2216}
2217
2218static irqreturn_t e100_intr(int irq, void *dev_id)
2219{
2220 struct net_device *netdev = dev_id;
2221 struct nic *nic = netdev_priv(netdev);
2222 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2223
2224 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2225 "stat_ack = 0x%02X\n", stat_ack);
2226
2227 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2228 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2229 return IRQ_NONE;
2230
2231 /* Ack interrupt(s) */
2232 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2233
2234 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2235 if (stat_ack & stat_ack_rnr)
2236 nic->ru_running = RU_SUSPENDED;
2237
2238 if (likely(napi_schedule_prep(&nic->napi))) {
2239 e100_disable_irq(nic);
2240 __napi_schedule(&nic->napi);
2241 }
2242
2243 return IRQ_HANDLED;
2244}
2245
2246static int e100_poll(struct napi_struct *napi, int budget)
2247{
2248 struct nic *nic = container_of(napi, struct nic, napi);
2249 unsigned int work_done = 0;
2250
2251 e100_rx_clean(nic, &work_done, budget);
2252 e100_tx_clean(nic);
2253
2254 /* If budget not fully consumed, exit the polling mode */
2255 if (work_done < budget) {
2256 napi_complete_done(napi, work_done);
2257 e100_enable_irq(nic);
2258 }
2259
2260 return work_done;
2261}
2262
2263#ifdef CONFIG_NET_POLL_CONTROLLER
2264static void e100_netpoll(struct net_device *netdev)
2265{
2266 struct nic *nic = netdev_priv(netdev);
2267
2268 e100_disable_irq(nic);
2269 e100_intr(nic->pdev->irq, netdev);
2270 e100_tx_clean(nic);
2271 e100_enable_irq(nic);
2272}
2273#endif
2274
2275static int e100_set_mac_address(struct net_device *netdev, void *p)
2276{
2277 struct nic *nic = netdev_priv(netdev);
2278 struct sockaddr *addr = p;
2279
2280 if (!is_valid_ether_addr(addr->sa_data))
2281 return -EADDRNOTAVAIL;
2282
2283 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2284 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2285
2286 return 0;
2287}
2288
2289static int e100_asf(struct nic *nic)
2290{
2291 /* ASF can be enabled from eeprom */
2292 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2293 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2294 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2295 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2296}
2297
2298static int e100_up(struct nic *nic)
2299{
2300 int err;
2301
2302 if ((err = e100_rx_alloc_list(nic)))
2303 return err;
2304 if ((err = e100_alloc_cbs(nic)))
2305 goto err_rx_clean_list;
2306 if ((err = e100_hw_init(nic)))
2307 goto err_clean_cbs;
2308 e100_set_multicast_list(nic->netdev);
2309 e100_start_receiver(nic, NULL);
2310 mod_timer(&nic->watchdog, jiffies);
2311 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2312 nic->netdev->name, nic->netdev)))
2313 goto err_no_irq;
2314 netif_wake_queue(nic->netdev);
2315 napi_enable(&nic->napi);
2316 /* enable ints _after_ enabling poll, preventing a race between
2317 * disable ints+schedule */
2318 e100_enable_irq(nic);
2319 return 0;
2320
2321err_no_irq:
2322 del_timer_sync(&nic->watchdog);
2323err_clean_cbs:
2324 e100_clean_cbs(nic);
2325err_rx_clean_list:
2326 e100_rx_clean_list(nic);
2327 return err;
2328}
2329
2330static void e100_down(struct nic *nic)
2331{
2332 /* wait here for poll to complete */
2333 napi_disable(&nic->napi);
2334 netif_stop_queue(nic->netdev);
2335 e100_hw_reset(nic);
2336 free_irq(nic->pdev->irq, nic->netdev);
2337 del_timer_sync(&nic->watchdog);
2338 netif_carrier_off(nic->netdev);
2339 e100_clean_cbs(nic);
2340 e100_rx_clean_list(nic);
2341}
2342
2343static void e100_tx_timeout(struct net_device *netdev)
2344{
2345 struct nic *nic = netdev_priv(netdev);
2346
2347 /* Reset outside of interrupt context, to avoid request_irq
2348 * in interrupt context */
2349 schedule_work(&nic->tx_timeout_task);
2350}
2351
2352static void e100_tx_timeout_task(struct work_struct *work)
2353{
2354 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2355 struct net_device *netdev = nic->netdev;
2356
2357 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2358 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2359
2360 rtnl_lock();
2361 if (netif_running(netdev)) {
2362 e100_down(netdev_priv(netdev));
2363 e100_up(netdev_priv(netdev));
2364 }
2365 rtnl_unlock();
2366}
2367
2368static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2369{
2370 int err;
2371 struct sk_buff *skb;
2372
2373 /* Use driver resources to perform internal MAC or PHY
2374 * loopback test. A single packet is prepared and transmitted
2375 * in loopback mode, and the test passes if the received
2376 * packet compares byte-for-byte to the transmitted packet. */
2377
2378 if ((err = e100_rx_alloc_list(nic)))
2379 return err;
2380 if ((err = e100_alloc_cbs(nic)))
2381 goto err_clean_rx;
2382
2383 /* ICH PHY loopback is broken so do MAC loopback instead */
2384 if (nic->flags & ich && loopback_mode == lb_phy)
2385 loopback_mode = lb_mac;
2386
2387 nic->loopback = loopback_mode;
2388 if ((err = e100_hw_init(nic)))
2389 goto err_loopback_none;
2390
2391 if (loopback_mode == lb_phy)
2392 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2393 BMCR_LOOPBACK);
2394
2395 e100_start_receiver(nic, NULL);
2396
2397 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2398 err = -ENOMEM;
2399 goto err_loopback_none;
2400 }
2401 skb_put(skb, ETH_DATA_LEN);
2402 memset(skb->data, 0xFF, ETH_DATA_LEN);
2403 e100_xmit_frame(skb, nic->netdev);
2404
2405 msleep(10);
2406
2407 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2408 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2409
2410 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2411 skb->data, ETH_DATA_LEN))
2412 err = -EAGAIN;
2413
2414err_loopback_none:
2415 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2416 nic->loopback = lb_none;
2417 e100_clean_cbs(nic);
2418 e100_hw_reset(nic);
2419err_clean_rx:
2420 e100_rx_clean_list(nic);
2421 return err;
2422}
2423
2424#define MII_LED_CONTROL 0x1B
2425#define E100_82552_LED_OVERRIDE 0x19
2426#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2427#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2428
2429static int e100_get_link_ksettings(struct net_device *netdev,
2430 struct ethtool_link_ksettings *cmd)
2431{
2432 struct nic *nic = netdev_priv(netdev);
2433
2434 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2435
2436 return 0;
2437}
2438
2439static int e100_set_link_ksettings(struct net_device *netdev,
2440 const struct ethtool_link_ksettings *cmd)
2441{
2442 struct nic *nic = netdev_priv(netdev);
2443 int err;
2444
2445 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2446 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2447 e100_exec_cb(nic, NULL, e100_configure);
2448
2449 return err;
2450}
2451
2452static void e100_get_drvinfo(struct net_device *netdev,
2453 struct ethtool_drvinfo *info)
2454{
2455 struct nic *nic = netdev_priv(netdev);
2456 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2457 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2458 strlcpy(info->bus_info, pci_name(nic->pdev),
2459 sizeof(info->bus_info));
2460}
2461
2462#define E100_PHY_REGS 0x1C
2463static int e100_get_regs_len(struct net_device *netdev)
2464{
2465 struct nic *nic = netdev_priv(netdev);
2466 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2467}
2468
2469static void e100_get_regs(struct net_device *netdev,
2470 struct ethtool_regs *regs, void *p)
2471{
2472 struct nic *nic = netdev_priv(netdev);
2473 u32 *buff = p;
2474 int i;
2475
2476 regs->version = (1 << 24) | nic->pdev->revision;
2477 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2478 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2479 ioread16(&nic->csr->scb.status);
2480 for (i = E100_PHY_REGS; i >= 0; i--)
2481 buff[1 + E100_PHY_REGS - i] =
2482 mdio_read(netdev, nic->mii.phy_id, i);
2483 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2484 e100_exec_cb(nic, NULL, e100_dump);
2485 msleep(10);
2486 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2487 sizeof(nic->mem->dump_buf));
2488}
2489
2490static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2491{
2492 struct nic *nic = netdev_priv(netdev);
2493 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2494 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2495}
2496
2497static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2498{
2499 struct nic *nic = netdev_priv(netdev);
2500
2501 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2502 !device_can_wakeup(&nic->pdev->dev))
2503 return -EOPNOTSUPP;
2504
2505 if (wol->wolopts)
2506 nic->flags |= wol_magic;
2507 else
2508 nic->flags &= ~wol_magic;
2509
2510 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2511
2512 e100_exec_cb(nic, NULL, e100_configure);
2513
2514 return 0;
2515}
2516
2517static u32 e100_get_msglevel(struct net_device *netdev)
2518{
2519 struct nic *nic = netdev_priv(netdev);
2520 return nic->msg_enable;
2521}
2522
2523static void e100_set_msglevel(struct net_device *netdev, u32 value)
2524{
2525 struct nic *nic = netdev_priv(netdev);
2526 nic->msg_enable = value;
2527}
2528
2529static int e100_nway_reset(struct net_device *netdev)
2530{
2531 struct nic *nic = netdev_priv(netdev);
2532 return mii_nway_restart(&nic->mii);
2533}
2534
2535static u32 e100_get_link(struct net_device *netdev)
2536{
2537 struct nic *nic = netdev_priv(netdev);
2538 return mii_link_ok(&nic->mii);
2539}
2540
2541static int e100_get_eeprom_len(struct net_device *netdev)
2542{
2543 struct nic *nic = netdev_priv(netdev);
2544 return nic->eeprom_wc << 1;
2545}
2546
2547#define E100_EEPROM_MAGIC 0x1234
2548static int e100_get_eeprom(struct net_device *netdev,
2549 struct ethtool_eeprom *eeprom, u8 *bytes)
2550{
2551 struct nic *nic = netdev_priv(netdev);
2552
2553 eeprom->magic = E100_EEPROM_MAGIC;
2554 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2555
2556 return 0;
2557}
2558
2559static int e100_set_eeprom(struct net_device *netdev,
2560 struct ethtool_eeprom *eeprom, u8 *bytes)
2561{
2562 struct nic *nic = netdev_priv(netdev);
2563
2564 if (eeprom->magic != E100_EEPROM_MAGIC)
2565 return -EINVAL;
2566
2567 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2568
2569 return e100_eeprom_save(nic, eeprom->offset >> 1,
2570 (eeprom->len >> 1) + 1);
2571}
2572
2573static void e100_get_ringparam(struct net_device *netdev,
2574 struct ethtool_ringparam *ring)
2575{
2576 struct nic *nic = netdev_priv(netdev);
2577 struct param_range *rfds = &nic->params.rfds;
2578 struct param_range *cbs = &nic->params.cbs;
2579
2580 ring->rx_max_pending = rfds->max;
2581 ring->tx_max_pending = cbs->max;
2582 ring->rx_pending = rfds->count;
2583 ring->tx_pending = cbs->count;
2584}
2585
2586static int e100_set_ringparam(struct net_device *netdev,
2587 struct ethtool_ringparam *ring)
2588{
2589 struct nic *nic = netdev_priv(netdev);
2590 struct param_range *rfds = &nic->params.rfds;
2591 struct param_range *cbs = &nic->params.cbs;
2592
2593 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2594 return -EINVAL;
2595
2596 if (netif_running(netdev))
2597 e100_down(nic);
2598 rfds->count = max(ring->rx_pending, rfds->min);
2599 rfds->count = min(rfds->count, rfds->max);
2600 cbs->count = max(ring->tx_pending, cbs->min);
2601 cbs->count = min(cbs->count, cbs->max);
2602 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2603 rfds->count, cbs->count);
2604 if (netif_running(netdev))
2605 e100_up(nic);
2606
2607 return 0;
2608}
2609
2610static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2611 "Link test (on/offline)",
2612 "Eeprom test (on/offline)",
2613 "Self test (offline)",
2614 "Mac loopback (offline)",
2615 "Phy loopback (offline)",
2616};
2617#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2618
2619static void e100_diag_test(struct net_device *netdev,
2620 struct ethtool_test *test, u64 *data)
2621{
2622 struct ethtool_cmd cmd;
2623 struct nic *nic = netdev_priv(netdev);
2624 int i, err;
2625
2626 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2627 data[0] = !mii_link_ok(&nic->mii);
2628 data[1] = e100_eeprom_load(nic);
2629 if (test->flags & ETH_TEST_FL_OFFLINE) {
2630
2631 /* save speed, duplex & autoneg settings */
2632 err = mii_ethtool_gset(&nic->mii, &cmd);
2633
2634 if (netif_running(netdev))
2635 e100_down(nic);
2636 data[2] = e100_self_test(nic);
2637 data[3] = e100_loopback_test(nic, lb_mac);
2638 data[4] = e100_loopback_test(nic, lb_phy);
2639
2640 /* restore speed, duplex & autoneg settings */
2641 err = mii_ethtool_sset(&nic->mii, &cmd);
2642
2643 if (netif_running(netdev))
2644 e100_up(nic);
2645 }
2646 for (i = 0; i < E100_TEST_LEN; i++)
2647 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2648
2649 msleep_interruptible(4 * 1000);
2650}
2651
2652static int e100_set_phys_id(struct net_device *netdev,
2653 enum ethtool_phys_id_state state)
2654{
2655 struct nic *nic = netdev_priv(netdev);
2656 enum led_state {
2657 led_on = 0x01,
2658 led_off = 0x04,
2659 led_on_559 = 0x05,
2660 led_on_557 = 0x07,
2661 };
2662 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2663 MII_LED_CONTROL;
2664 u16 leds = 0;
2665
2666 switch (state) {
2667 case ETHTOOL_ID_ACTIVE:
2668 return 2;
2669
2670 case ETHTOOL_ID_ON:
2671 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2672 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2673 break;
2674
2675 case ETHTOOL_ID_OFF:
2676 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2677 break;
2678
2679 case ETHTOOL_ID_INACTIVE:
2680 break;
2681 }
2682
2683 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2684 return 0;
2685}
2686
2687static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2688 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2689 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2690 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2691 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2692 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2693 "tx_heartbeat_errors", "tx_window_errors",
2694 /* device-specific stats */
2695 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2696 "tx_flow_control_pause", "rx_flow_control_pause",
2697 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2698 "rx_short_frame_errors", "rx_over_length_errors",
2699};
2700#define E100_NET_STATS_LEN 21
2701#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2702
2703static int e100_get_sset_count(struct net_device *netdev, int sset)
2704{
2705 switch (sset) {
2706 case ETH_SS_TEST:
2707 return E100_TEST_LEN;
2708 case ETH_SS_STATS:
2709 return E100_STATS_LEN;
2710 default:
2711 return -EOPNOTSUPP;
2712 }
2713}
2714
2715static void e100_get_ethtool_stats(struct net_device *netdev,
2716 struct ethtool_stats *stats, u64 *data)
2717{
2718 struct nic *nic = netdev_priv(netdev);
2719 int i;
2720
2721 for (i = 0; i < E100_NET_STATS_LEN; i++)
2722 data[i] = ((unsigned long *)&netdev->stats)[i];
2723
2724 data[i++] = nic->tx_deferred;
2725 data[i++] = nic->tx_single_collisions;
2726 data[i++] = nic->tx_multiple_collisions;
2727 data[i++] = nic->tx_fc_pause;
2728 data[i++] = nic->rx_fc_pause;
2729 data[i++] = nic->rx_fc_unsupported;
2730 data[i++] = nic->tx_tco_frames;
2731 data[i++] = nic->rx_tco_frames;
2732 data[i++] = nic->rx_short_frame_errors;
2733 data[i++] = nic->rx_over_length_errors;
2734}
2735
2736static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2737{
2738 switch (stringset) {
2739 case ETH_SS_TEST:
2740 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2741 break;
2742 case ETH_SS_STATS:
2743 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2744 break;
2745 }
2746}
2747
2748static const struct ethtool_ops e100_ethtool_ops = {
2749 .get_drvinfo = e100_get_drvinfo,
2750 .get_regs_len = e100_get_regs_len,
2751 .get_regs = e100_get_regs,
2752 .get_wol = e100_get_wol,
2753 .set_wol = e100_set_wol,
2754 .get_msglevel = e100_get_msglevel,
2755 .set_msglevel = e100_set_msglevel,
2756 .nway_reset = e100_nway_reset,
2757 .get_link = e100_get_link,
2758 .get_eeprom_len = e100_get_eeprom_len,
2759 .get_eeprom = e100_get_eeprom,
2760 .set_eeprom = e100_set_eeprom,
2761 .get_ringparam = e100_get_ringparam,
2762 .set_ringparam = e100_set_ringparam,
2763 .self_test = e100_diag_test,
2764 .get_strings = e100_get_strings,
2765 .set_phys_id = e100_set_phys_id,
2766 .get_ethtool_stats = e100_get_ethtool_stats,
2767 .get_sset_count = e100_get_sset_count,
2768 .get_ts_info = ethtool_op_get_ts_info,
2769 .get_link_ksettings = e100_get_link_ksettings,
2770 .set_link_ksettings = e100_set_link_ksettings,
2771};
2772
2773static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2774{
2775 struct nic *nic = netdev_priv(netdev);
2776
2777 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2778}
2779
2780static int e100_alloc(struct nic *nic)
2781{
2782 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2783 &nic->dma_addr);
2784 return nic->mem ? 0 : -ENOMEM;
2785}
2786
2787static void e100_free(struct nic *nic)
2788{
2789 if (nic->mem) {
2790 pci_free_consistent(nic->pdev, sizeof(struct mem),
2791 nic->mem, nic->dma_addr);
2792 nic->mem = NULL;
2793 }
2794}
2795
2796static int e100_open(struct net_device *netdev)
2797{
2798 struct nic *nic = netdev_priv(netdev);
2799 int err = 0;
2800
2801 netif_carrier_off(netdev);
2802 if ((err = e100_up(nic)))
2803 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2804 return err;
2805}
2806
2807static int e100_close(struct net_device *netdev)
2808{
2809 e100_down(netdev_priv(netdev));
2810 return 0;
2811}
2812
2813static int e100_set_features(struct net_device *netdev,
2814 netdev_features_t features)
2815{
2816 struct nic *nic = netdev_priv(netdev);
2817 netdev_features_t changed = features ^ netdev->features;
2818
2819 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2820 return 0;
2821
2822 netdev->features = features;
2823 e100_exec_cb(nic, NULL, e100_configure);
2824 return 0;
2825}
2826
2827static const struct net_device_ops e100_netdev_ops = {
2828 .ndo_open = e100_open,
2829 .ndo_stop = e100_close,
2830 .ndo_start_xmit = e100_xmit_frame,
2831 .ndo_validate_addr = eth_validate_addr,
2832 .ndo_set_rx_mode = e100_set_multicast_list,
2833 .ndo_set_mac_address = e100_set_mac_address,
2834 .ndo_do_ioctl = e100_do_ioctl,
2835 .ndo_tx_timeout = e100_tx_timeout,
2836#ifdef CONFIG_NET_POLL_CONTROLLER
2837 .ndo_poll_controller = e100_netpoll,
2838#endif
2839 .ndo_set_features = e100_set_features,
2840};
2841
2842static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2843{
2844 struct net_device *netdev;
2845 struct nic *nic;
2846 int err;
2847
2848 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2849 return -ENOMEM;
2850
2851 netdev->hw_features |= NETIF_F_RXFCS;
2852 netdev->priv_flags |= IFF_SUPP_NOFCS;
2853 netdev->hw_features |= NETIF_F_RXALL;
2854
2855 netdev->netdev_ops = &e100_netdev_ops;
2856 netdev->ethtool_ops = &e100_ethtool_ops;
2857 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2858 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2859
2860 nic = netdev_priv(netdev);
2861 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2862 nic->netdev = netdev;
2863 nic->pdev = pdev;
2864 nic->msg_enable = (1 << debug) - 1;
2865 nic->mdio_ctrl = mdio_ctrl_hw;
2866 pci_set_drvdata(pdev, netdev);
2867
2868 if ((err = pci_enable_device(pdev))) {
2869 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2870 goto err_out_free_dev;
2871 }
2872
2873 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2874 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2875 err = -ENODEV;
2876 goto err_out_disable_pdev;
2877 }
2878
2879 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2880 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2881 goto err_out_disable_pdev;
2882 }
2883
2884 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2885 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2886 goto err_out_free_res;
2887 }
2888
2889 SET_NETDEV_DEV(netdev, &pdev->dev);
2890
2891 if (use_io)
2892 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2893
2894 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2895 if (!nic->csr) {
2896 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2897 err = -ENOMEM;
2898 goto err_out_free_res;
2899 }
2900
2901 if (ent->driver_data)
2902 nic->flags |= ich;
2903 else
2904 nic->flags &= ~ich;
2905
2906 e100_get_defaults(nic);
2907
2908 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2909 if (nic->mac < mac_82558_D101_A4)
2910 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2911
2912 /* locks must be initialized before calling hw_reset */
2913 spin_lock_init(&nic->cb_lock);
2914 spin_lock_init(&nic->cmd_lock);
2915 spin_lock_init(&nic->mdio_lock);
2916
2917 /* Reset the device before pci_set_master() in case device is in some
2918 * funky state and has an interrupt pending - hint: we don't have the
2919 * interrupt handler registered yet. */
2920 e100_hw_reset(nic);
2921
2922 pci_set_master(pdev);
2923
2924 timer_setup(&nic->watchdog, e100_watchdog, 0);
2925
2926 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2927
2928 if ((err = e100_alloc(nic))) {
2929 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2930 goto err_out_iounmap;
2931 }
2932
2933 if ((err = e100_eeprom_load(nic)))
2934 goto err_out_free;
2935
2936 e100_phy_init(nic);
2937
2938 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2939 if (!is_valid_ether_addr(netdev->dev_addr)) {
2940 if (!eeprom_bad_csum_allow) {
2941 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2942 err = -EAGAIN;
2943 goto err_out_free;
2944 } else {
2945 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2946 }
2947 }
2948
2949 /* Wol magic packet can be enabled from eeprom */
2950 if ((nic->mac >= mac_82558_D101_A4) &&
2951 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2952 nic->flags |= wol_magic;
2953 device_set_wakeup_enable(&pdev->dev, true);
2954 }
2955
2956 /* ack any pending wake events, disable PME */
2957 pci_pme_active(pdev, false);
2958
2959 strcpy(netdev->name, "eth%d");
2960 if ((err = register_netdev(netdev))) {
2961 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2962 goto err_out_free;
2963 }
2964 nic->cbs_pool = dma_pool_create(netdev->name,
2965 &nic->pdev->dev,
2966 nic->params.cbs.max * sizeof(struct cb),
2967 sizeof(u32),
2968 0);
2969 if (!nic->cbs_pool) {
2970 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2971 err = -ENOMEM;
2972 goto err_out_pool;
2973 }
2974 netif_info(nic, probe, nic->netdev,
2975 "addr 0x%llx, irq %d, MAC addr %pM\n",
2976 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2977 pdev->irq, netdev->dev_addr);
2978
2979 return 0;
2980
2981err_out_pool:
2982 unregister_netdev(netdev);
2983err_out_free:
2984 e100_free(nic);
2985err_out_iounmap:
2986 pci_iounmap(pdev, nic->csr);
2987err_out_free_res:
2988 pci_release_regions(pdev);
2989err_out_disable_pdev:
2990 pci_disable_device(pdev);
2991err_out_free_dev:
2992 free_netdev(netdev);
2993 return err;
2994}
2995
2996static void e100_remove(struct pci_dev *pdev)
2997{
2998 struct net_device *netdev = pci_get_drvdata(pdev);
2999
3000 if (netdev) {
3001 struct nic *nic = netdev_priv(netdev);
3002 unregister_netdev(netdev);
3003 e100_free(nic);
3004 pci_iounmap(pdev, nic->csr);
3005 dma_pool_destroy(nic->cbs_pool);
3006 free_netdev(netdev);
3007 pci_release_regions(pdev);
3008 pci_disable_device(pdev);
3009 }
3010}
3011
3012#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3013#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3014#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3015static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3016{
3017 struct net_device *netdev = pci_get_drvdata(pdev);
3018 struct nic *nic = netdev_priv(netdev);
3019
3020 if (netif_running(netdev))
3021 e100_down(nic);
3022 netif_device_detach(netdev);
3023
3024 pci_save_state(pdev);
3025
3026 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3027 /* enable reverse auto-negotiation */
3028 if (nic->phy == phy_82552_v) {
3029 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3030 E100_82552_SMARTSPEED);
3031
3032 mdio_write(netdev, nic->mii.phy_id,
3033 E100_82552_SMARTSPEED, smartspeed |
3034 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3035 }
3036 *enable_wake = true;
3037 } else {
3038 *enable_wake = false;
3039 }
3040
3041 pci_clear_master(pdev);
3042}
3043
3044static int __e100_power_off(struct pci_dev *pdev, bool wake)
3045{
3046 if (wake)
3047 return pci_prepare_to_sleep(pdev);
3048
3049 pci_wake_from_d3(pdev, false);
3050 pci_set_power_state(pdev, PCI_D3hot);
3051
3052 return 0;
3053}
3054
3055#ifdef CONFIG_PM
3056static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3057{
3058 bool wake;
3059 __e100_shutdown(pdev, &wake);
3060 return __e100_power_off(pdev, wake);
3061}
3062
3063static int e100_resume(struct pci_dev *pdev)
3064{
3065 struct net_device *netdev = pci_get_drvdata(pdev);
3066 struct nic *nic = netdev_priv(netdev);
3067
3068 pci_set_power_state(pdev, PCI_D0);
3069 pci_restore_state(pdev);
3070 /* ack any pending wake events, disable PME */
3071 pci_enable_wake(pdev, PCI_D0, 0);
3072
3073 /* disable reverse auto-negotiation */
3074 if (nic->phy == phy_82552_v) {
3075 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3076 E100_82552_SMARTSPEED);
3077
3078 mdio_write(netdev, nic->mii.phy_id,
3079 E100_82552_SMARTSPEED,
3080 smartspeed & ~(E100_82552_REV_ANEG));
3081 }
3082
3083 netif_device_attach(netdev);
3084 if (netif_running(netdev))
3085 e100_up(nic);
3086
3087 return 0;
3088}
3089#endif /* CONFIG_PM */
3090
3091static void e100_shutdown(struct pci_dev *pdev)
3092{
3093 bool wake;
3094 __e100_shutdown(pdev, &wake);
3095 if (system_state == SYSTEM_POWER_OFF)
3096 __e100_power_off(pdev, wake);
3097}
3098
3099/* ------------------ PCI Error Recovery infrastructure -------------- */
3100/**
3101 * e100_io_error_detected - called when PCI error is detected.
3102 * @pdev: Pointer to PCI device
3103 * @state: The current pci connection state
3104 */
3105static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3106{
3107 struct net_device *netdev = pci_get_drvdata(pdev);
3108 struct nic *nic = netdev_priv(netdev);
3109
3110 netif_device_detach(netdev);
3111
3112 if (state == pci_channel_io_perm_failure)
3113 return PCI_ERS_RESULT_DISCONNECT;
3114
3115 if (netif_running(netdev))
3116 e100_down(nic);
3117 pci_disable_device(pdev);
3118
3119 /* Request a slot reset. */
3120 return PCI_ERS_RESULT_NEED_RESET;
3121}
3122
3123/**
3124 * e100_io_slot_reset - called after the pci bus has been reset.
3125 * @pdev: Pointer to PCI device
3126 *
3127 * Restart the card from scratch.
3128 */
3129static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3130{
3131 struct net_device *netdev = pci_get_drvdata(pdev);
3132 struct nic *nic = netdev_priv(netdev);
3133
3134 if (pci_enable_device(pdev)) {
3135 pr_err("Cannot re-enable PCI device after reset\n");
3136 return PCI_ERS_RESULT_DISCONNECT;
3137 }
3138 pci_set_master(pdev);
3139
3140 /* Only one device per card can do a reset */
3141 if (0 != PCI_FUNC(pdev->devfn))
3142 return PCI_ERS_RESULT_RECOVERED;
3143 e100_hw_reset(nic);
3144 e100_phy_init(nic);
3145
3146 return PCI_ERS_RESULT_RECOVERED;
3147}
3148
3149/**
3150 * e100_io_resume - resume normal operations
3151 * @pdev: Pointer to PCI device
3152 *
3153 * Resume normal operations after an error recovery
3154 * sequence has been completed.
3155 */
3156static void e100_io_resume(struct pci_dev *pdev)
3157{
3158 struct net_device *netdev = pci_get_drvdata(pdev);
3159 struct nic *nic = netdev_priv(netdev);
3160
3161 /* ack any pending wake events, disable PME */
3162 pci_enable_wake(pdev, PCI_D0, 0);
3163
3164 netif_device_attach(netdev);
3165 if (netif_running(netdev)) {
3166 e100_open(netdev);
3167 mod_timer(&nic->watchdog, jiffies);
3168 }
3169}
3170
3171static const struct pci_error_handlers e100_err_handler = {
3172 .error_detected = e100_io_error_detected,
3173 .slot_reset = e100_io_slot_reset,
3174 .resume = e100_io_resume,
3175};
3176
3177static struct pci_driver e100_driver = {
3178 .name = DRV_NAME,
3179 .id_table = e100_id_table,
3180 .probe = e100_probe,
3181 .remove = e100_remove,
3182#ifdef CONFIG_PM
3183 /* Power Management hooks */
3184 .suspend = e100_suspend,
3185 .resume = e100_resume,
3186#endif
3187 .shutdown = e100_shutdown,
3188 .err_handler = &e100_err_handler,
3189};
3190
3191static int __init e100_init_module(void)
3192{
3193 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3194 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3195 pr_info("%s\n", DRV_COPYRIGHT);
3196 }
3197 return pci_register_driver(&e100_driver);
3198}
3199
3200static void __exit e100_cleanup_module(void)
3201{
3202 pci_unregister_driver(&e100_driver);
3203}
3204
3205module_init(e100_init_module);
3206module_exit(e100_cleanup_module);
1/*******************************************************************************
2
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Receive
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * In order to keep updates to the RFD link field from colliding with
110 * hardware writes to mark packets complete, we use the feature that
111 * hardware will not write to a size 0 descriptor and mark the previous
112 * packet as end-of-list (EL). After updating the link, we remove EL
113 * and only then restore the size such that hardware may use the
114 * previous-to-end RFD.
115 *
116 * Under typical operation, the receive unit (RU) is start once,
117 * and the controller happily fills RFDs as frames arrive. If
118 * replacement RFDs cannot be allocated, or the RU goes non-active,
119 * the RU must be restarted. Frame arrival generates an interrupt,
120 * and Rx indication and re-allocation happen in the same context,
121 * therefore no locking is required. A software-generated interrupt
122 * is generated from the watchdog to recover from a failed allocation
123 * scenario where all Rx resources have been indicated and none re-
124 * placed.
125 *
126 * V. Miscellaneous
127 *
128 * VLAN offloading of tagging, stripping and filtering is not
129 * supported, but driver will accommodate the extra 4-byte VLAN tag
130 * for processing by upper layers. Tx/Rx Checksum offloading is not
131 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132 * not supported (hardware limitation).
133 *
134 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135 *
136 * Thanks to JC (jchapman@katalix.com) for helping with
137 * testing/troubleshooting the development driver.
138 *
139 * TODO:
140 * o several entry points race with dev->close
141 * o check for tx-no-resources/stop Q races with tx clean/wake Q
142 *
143 * FIXES:
144 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145 * - Stratus87247: protect MDI control register manipulations
146 * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
148 */
149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
152#include <linux/hardirq.h>
153#include <linux/interrupt.h>
154#include <linux/module.h>
155#include <linux/moduleparam.h>
156#include <linux/kernel.h>
157#include <linux/types.h>
158#include <linux/sched.h>
159#include <linux/slab.h>
160#include <linux/delay.h>
161#include <linux/init.h>
162#include <linux/pci.h>
163#include <linux/dma-mapping.h>
164#include <linux/dmapool.h>
165#include <linux/netdevice.h>
166#include <linux/etherdevice.h>
167#include <linux/mii.h>
168#include <linux/if_vlan.h>
169#include <linux/skbuff.h>
170#include <linux/ethtool.h>
171#include <linux/string.h>
172#include <linux/firmware.h>
173#include <linux/rtnetlink.h>
174#include <asm/unaligned.h>
175
176
177#define DRV_NAME "e100"
178#define DRV_EXT "-NAPI"
179#define DRV_VERSION "3.5.24-k2"DRV_EXT
180#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
182
183#define E100_WATCHDOG_PERIOD (2 * HZ)
184#define E100_NAPI_WEIGHT 16
185
186#define FIRMWARE_D101M "e100/d101m_ucode.bin"
187#define FIRMWARE_D101S "e100/d101s_ucode.bin"
188#define FIRMWARE_D102E "e100/d102e_ucode.bin"
189
190MODULE_DESCRIPTION(DRV_DESCRIPTION);
191MODULE_AUTHOR(DRV_COPYRIGHT);
192MODULE_LICENSE("GPL");
193MODULE_VERSION(DRV_VERSION);
194MODULE_FIRMWARE(FIRMWARE_D101M);
195MODULE_FIRMWARE(FIRMWARE_D101S);
196MODULE_FIRMWARE(FIRMWARE_D102E);
197
198static int debug = 3;
199static int eeprom_bad_csum_allow = 0;
200static int use_io = 0;
201module_param(debug, int, 0);
202module_param(eeprom_bad_csum_allow, int, 0);
203module_param(use_io, int, 0);
204MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
207
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254 { 0, }
255};
256MODULE_DEVICE_TABLE(pci, e100_id_table);
257
258enum mac {
259 mac_82557_D100_A = 0,
260 mac_82557_D100_B = 1,
261 mac_82557_D100_C = 2,
262 mac_82558_D101_A4 = 4,
263 mac_82558_D101_B0 = 5,
264 mac_82559_D101M = 8,
265 mac_82559_D101S = 9,
266 mac_82550_D102 = 12,
267 mac_82550_D102_C = 13,
268 mac_82551_E = 14,
269 mac_82551_F = 15,
270 mac_82551_10 = 16,
271 mac_unknown = 0xFF,
272};
273
274enum phy {
275 phy_100a = 0x000003E0,
276 phy_100c = 0x035002A8,
277 phy_82555_tx = 0x015002A8,
278 phy_nsc_tx = 0x5C002000,
279 phy_82562_et = 0x033002A8,
280 phy_82562_em = 0x032002A8,
281 phy_82562_ek = 0x031002A8,
282 phy_82562_eh = 0x017002A8,
283 phy_82552_v = 0xd061004d,
284 phy_unknown = 0xFFFFFFFF,
285};
286
287/* CSR (Control/Status Registers) */
288struct csr {
289 struct {
290 u8 status;
291 u8 stat_ack;
292 u8 cmd_lo;
293 u8 cmd_hi;
294 u32 gen_ptr;
295 } scb;
296 u32 port;
297 u16 flash_ctrl;
298 u8 eeprom_ctrl_lo;
299 u8 eeprom_ctrl_hi;
300 u32 mdi_ctrl;
301 u32 rx_dma_count;
302};
303
304enum scb_status {
305 rus_no_res = 0x08,
306 rus_ready = 0x10,
307 rus_mask = 0x3C,
308};
309
310enum ru_state {
311 RU_SUSPENDED = 0,
312 RU_RUNNING = 1,
313 RU_UNINITIALIZED = -1,
314};
315
316enum scb_stat_ack {
317 stat_ack_not_ours = 0x00,
318 stat_ack_sw_gen = 0x04,
319 stat_ack_rnr = 0x10,
320 stat_ack_cu_idle = 0x20,
321 stat_ack_frame_rx = 0x40,
322 stat_ack_cu_cmd_done = 0x80,
323 stat_ack_not_present = 0xFF,
324 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
325 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
326};
327
328enum scb_cmd_hi {
329 irq_mask_none = 0x00,
330 irq_mask_all = 0x01,
331 irq_sw_gen = 0x02,
332};
333
334enum scb_cmd_lo {
335 cuc_nop = 0x00,
336 ruc_start = 0x01,
337 ruc_load_base = 0x06,
338 cuc_start = 0x10,
339 cuc_resume = 0x20,
340 cuc_dump_addr = 0x40,
341 cuc_dump_stats = 0x50,
342 cuc_load_base = 0x60,
343 cuc_dump_reset = 0x70,
344};
345
346enum cuc_dump {
347 cuc_dump_complete = 0x0000A005,
348 cuc_dump_reset_complete = 0x0000A007,
349};
350
351enum port {
352 software_reset = 0x0000,
353 selftest = 0x0001,
354 selective_reset = 0x0002,
355};
356
357enum eeprom_ctrl_lo {
358 eesk = 0x01,
359 eecs = 0x02,
360 eedi = 0x04,
361 eedo = 0x08,
362};
363
364enum mdi_ctrl {
365 mdi_write = 0x04000000,
366 mdi_read = 0x08000000,
367 mdi_ready = 0x10000000,
368};
369
370enum eeprom_op {
371 op_write = 0x05,
372 op_read = 0x06,
373 op_ewds = 0x10,
374 op_ewen = 0x13,
375};
376
377enum eeprom_offsets {
378 eeprom_cnfg_mdix = 0x03,
379 eeprom_phy_iface = 0x06,
380 eeprom_id = 0x0A,
381 eeprom_config_asf = 0x0D,
382 eeprom_smbus_addr = 0x90,
383};
384
385enum eeprom_cnfg_mdix {
386 eeprom_mdix_enabled = 0x0080,
387};
388
389enum eeprom_phy_iface {
390 NoSuchPhy = 0,
391 I82553AB,
392 I82553C,
393 I82503,
394 DP83840,
395 S80C240,
396 S80C24,
397 I82555,
398 DP83840A = 10,
399};
400
401enum eeprom_id {
402 eeprom_id_wol = 0x0020,
403};
404
405enum eeprom_config_asf {
406 eeprom_asf = 0x8000,
407 eeprom_gcl = 0x4000,
408};
409
410enum cb_status {
411 cb_complete = 0x8000,
412 cb_ok = 0x2000,
413};
414
415/**
416 * cb_command - Command Block flags
417 * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory
418 */
419enum cb_command {
420 cb_nop = 0x0000,
421 cb_iaaddr = 0x0001,
422 cb_config = 0x0002,
423 cb_multi = 0x0003,
424 cb_tx = 0x0004,
425 cb_ucode = 0x0005,
426 cb_dump = 0x0006,
427 cb_tx_sf = 0x0008,
428 cb_tx_nc = 0x0010,
429 cb_cid = 0x1f00,
430 cb_i = 0x2000,
431 cb_s = 0x4000,
432 cb_el = 0x8000,
433};
434
435struct rfd {
436 __le16 status;
437 __le16 command;
438 __le32 link;
439 __le32 rbd;
440 __le16 actual_size;
441 __le16 size;
442};
443
444struct rx {
445 struct rx *next, *prev;
446 struct sk_buff *skb;
447 dma_addr_t dma_addr;
448};
449
450#if defined(__BIG_ENDIAN_BITFIELD)
451#define X(a,b) b,a
452#else
453#define X(a,b) a,b
454#endif
455struct config {
456/*0*/ u8 X(byte_count:6, pad0:2);
457/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
458/*2*/ u8 adaptive_ifs;
459/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460 term_write_cache_line:1), pad3:4);
461/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465 rx_save_overruns : 1), rx_save_bad_frames : 1);
466/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468 tx_dynamic_tbd:1);
469/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473 loopback:2);
474/*11*/ u8 X(linear_priority:3, pad11:5);
475/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476/*13*/ u8 ip_addr_lo;
477/*14*/ u8 ip_addr_hi;
478/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480 pad15_2:1), crs_or_cdt:1);
481/*16*/ u8 fc_delay_lo;
482/*17*/ u8 fc_delay_hi;
483/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487 full_duplex_force:1), full_duplex_pin:1);
488/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
491 u8 pad_d102[9];
492};
493
494#define E100_MAX_MULTICAST_ADDRS 64
495struct multi {
496 __le16 count;
497 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
498};
499
500/* Important: keep total struct u32-aligned */
501#define UCODE_SIZE 134
502struct cb {
503 __le16 status;
504 __le16 command;
505 __le32 link;
506 union {
507 u8 iaaddr[ETH_ALEN];
508 __le32 ucode[UCODE_SIZE];
509 struct config config;
510 struct multi multi;
511 struct {
512 u32 tbd_array;
513 u16 tcb_byte_count;
514 u8 threshold;
515 u8 tbd_count;
516 struct {
517 __le32 buf_addr;
518 __le16 size;
519 u16 eol;
520 } tbd;
521 } tcb;
522 __le32 dump_buffer_addr;
523 } u;
524 struct cb *next, *prev;
525 dma_addr_t dma_addr;
526 struct sk_buff *skb;
527};
528
529enum loopback {
530 lb_none = 0, lb_mac = 1, lb_phy = 3,
531};
532
533struct stats {
534 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
535 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
536 tx_multiple_collisions, tx_total_collisions;
537 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
538 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
539 rx_short_frame_errors;
540 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
541 __le16 xmt_tco_frames, rcv_tco_frames;
542 __le32 complete;
543};
544
545struct mem {
546 struct {
547 u32 signature;
548 u32 result;
549 } selftest;
550 struct stats stats;
551 u8 dump_buf[596];
552};
553
554struct param_range {
555 u32 min;
556 u32 max;
557 u32 count;
558};
559
560struct params {
561 struct param_range rfds;
562 struct param_range cbs;
563};
564
565struct nic {
566 /* Begin: frequently used values: keep adjacent for cache effect */
567 u32 msg_enable ____cacheline_aligned;
568 struct net_device *netdev;
569 struct pci_dev *pdev;
570 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
571
572 struct rx *rxs ____cacheline_aligned;
573 struct rx *rx_to_use;
574 struct rx *rx_to_clean;
575 struct rfd blank_rfd;
576 enum ru_state ru_running;
577
578 spinlock_t cb_lock ____cacheline_aligned;
579 spinlock_t cmd_lock;
580 struct csr __iomem *csr;
581 enum scb_cmd_lo cuc_cmd;
582 unsigned int cbs_avail;
583 struct napi_struct napi;
584 struct cb *cbs;
585 struct cb *cb_to_use;
586 struct cb *cb_to_send;
587 struct cb *cb_to_clean;
588 __le16 tx_command;
589 /* End: frequently used values: keep adjacent for cache effect */
590
591 enum {
592 ich = (1 << 0),
593 promiscuous = (1 << 1),
594 multicast_all = (1 << 2),
595 wol_magic = (1 << 3),
596 ich_10h_workaround = (1 << 4),
597 } flags ____cacheline_aligned;
598
599 enum mac mac;
600 enum phy phy;
601 struct params params;
602 struct timer_list watchdog;
603 struct mii_if_info mii;
604 struct work_struct tx_timeout_task;
605 enum loopback loopback;
606
607 struct mem *mem;
608 dma_addr_t dma_addr;
609
610 struct pci_pool *cbs_pool;
611 dma_addr_t cbs_dma_addr;
612 u8 adaptive_ifs;
613 u8 tx_threshold;
614 u32 tx_frames;
615 u32 tx_collisions;
616 u32 tx_deferred;
617 u32 tx_single_collisions;
618 u32 tx_multiple_collisions;
619 u32 tx_fc_pause;
620 u32 tx_tco_frames;
621
622 u32 rx_fc_pause;
623 u32 rx_fc_unsupported;
624 u32 rx_tco_frames;
625 u32 rx_short_frame_errors;
626 u32 rx_over_length_errors;
627
628 u16 eeprom_wc;
629 __le16 eeprom[256];
630 spinlock_t mdio_lock;
631 const struct firmware *fw;
632};
633
634static inline void e100_write_flush(struct nic *nic)
635{
636 /* Flush previous PCI writes through intermediate bridges
637 * by doing a benign read */
638 (void)ioread8(&nic->csr->scb.status);
639}
640
641static void e100_enable_irq(struct nic *nic)
642{
643 unsigned long flags;
644
645 spin_lock_irqsave(&nic->cmd_lock, flags);
646 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647 e100_write_flush(nic);
648 spin_unlock_irqrestore(&nic->cmd_lock, flags);
649}
650
651static void e100_disable_irq(struct nic *nic)
652{
653 unsigned long flags;
654
655 spin_lock_irqsave(&nic->cmd_lock, flags);
656 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657 e100_write_flush(nic);
658 spin_unlock_irqrestore(&nic->cmd_lock, flags);
659}
660
661static void e100_hw_reset(struct nic *nic)
662{
663 /* Put CU and RU into idle with a selective reset to get
664 * device off of PCI bus */
665 iowrite32(selective_reset, &nic->csr->port);
666 e100_write_flush(nic); udelay(20);
667
668 /* Now fully reset device */
669 iowrite32(software_reset, &nic->csr->port);
670 e100_write_flush(nic); udelay(20);
671
672 /* Mask off our interrupt line - it's unmasked after reset */
673 e100_disable_irq(nic);
674}
675
676static int e100_self_test(struct nic *nic)
677{
678 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679
680 /* Passing the self-test is a pretty good indication
681 * that the device can DMA to/from host memory */
682
683 nic->mem->selftest.signature = 0;
684 nic->mem->selftest.result = 0xFFFFFFFF;
685
686 iowrite32(selftest | dma_addr, &nic->csr->port);
687 e100_write_flush(nic);
688 /* Wait 10 msec for self-test to complete */
689 msleep(10);
690
691 /* Interrupts are enabled after self-test */
692 e100_disable_irq(nic);
693
694 /* Check results of self-test */
695 if (nic->mem->selftest.result != 0) {
696 netif_err(nic, hw, nic->netdev,
697 "Self-test failed: result=0x%08X\n",
698 nic->mem->selftest.result);
699 return -ETIMEDOUT;
700 }
701 if (nic->mem->selftest.signature == 0) {
702 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
703 return -ETIMEDOUT;
704 }
705
706 return 0;
707}
708
709static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
710{
711 u32 cmd_addr_data[3];
712 u8 ctrl;
713 int i, j;
714
715 /* Three cmds: write/erase enable, write data, write/erase disable */
716 cmd_addr_data[0] = op_ewen << (addr_len - 2);
717 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
718 le16_to_cpu(data);
719 cmd_addr_data[2] = op_ewds << (addr_len - 2);
720
721 /* Bit-bang cmds to write word to eeprom */
722 for (j = 0; j < 3; j++) {
723
724 /* Chip select */
725 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726 e100_write_flush(nic); udelay(4);
727
728 for (i = 31; i >= 0; i--) {
729 ctrl = (cmd_addr_data[j] & (1 << i)) ?
730 eecs | eedi : eecs;
731 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732 e100_write_flush(nic); udelay(4);
733
734 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735 e100_write_flush(nic); udelay(4);
736 }
737 /* Wait 10 msec for cmd to complete */
738 msleep(10);
739
740 /* Chip deselect */
741 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742 e100_write_flush(nic); udelay(4);
743 }
744};
745
746/* General technique stolen from the eepro100 driver - very clever */
747static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
748{
749 u32 cmd_addr_data;
750 u16 data = 0;
751 u8 ctrl;
752 int i;
753
754 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755
756 /* Chip select */
757 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758 e100_write_flush(nic); udelay(4);
759
760 /* Bit-bang to read word from eeprom */
761 for (i = 31; i >= 0; i--) {
762 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764 e100_write_flush(nic); udelay(4);
765
766 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767 e100_write_flush(nic); udelay(4);
768
769 /* Eeprom drives a dummy zero to EEDO after receiving
770 * complete address. Use this to adjust addr_len. */
771 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772 if (!(ctrl & eedo) && i > 16) {
773 *addr_len -= (i - 16);
774 i = 17;
775 }
776
777 data = (data << 1) | (ctrl & eedo ? 1 : 0);
778 }
779
780 /* Chip deselect */
781 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782 e100_write_flush(nic); udelay(4);
783
784 return cpu_to_le16(data);
785};
786
787/* Load entire EEPROM image into driver cache and validate checksum */
788static int e100_eeprom_load(struct nic *nic)
789{
790 u16 addr, addr_len = 8, checksum = 0;
791
792 /* Try reading with an 8-bit addr len to discover actual addr len */
793 e100_eeprom_read(nic, &addr_len, 0);
794 nic->eeprom_wc = 1 << addr_len;
795
796 for (addr = 0; addr < nic->eeprom_wc; addr++) {
797 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798 if (addr < nic->eeprom_wc - 1)
799 checksum += le16_to_cpu(nic->eeprom[addr]);
800 }
801
802 /* The checksum, stored in the last word, is calculated such that
803 * the sum of words should be 0xBABA */
804 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806 if (!eeprom_bad_csum_allow)
807 return -EAGAIN;
808 }
809
810 return 0;
811}
812
813/* Save (portion of) driver EEPROM cache to device and update checksum */
814static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815{
816 u16 addr, addr_len = 8, checksum = 0;
817
818 /* Try reading with an 8-bit addr len to discover actual addr len */
819 e100_eeprom_read(nic, &addr_len, 0);
820 nic->eeprom_wc = 1 << addr_len;
821
822 if (start + count >= nic->eeprom_wc)
823 return -EINVAL;
824
825 for (addr = start; addr < start + count; addr++)
826 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827
828 /* The checksum, stored in the last word, is calculated such that
829 * the sum of words should be 0xBABA */
830 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831 checksum += le16_to_cpu(nic->eeprom[addr]);
832 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834 nic->eeprom[nic->eeprom_wc - 1]);
835
836 return 0;
837}
838
839#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
840#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
841static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
842{
843 unsigned long flags;
844 unsigned int i;
845 int err = 0;
846
847 spin_lock_irqsave(&nic->cmd_lock, flags);
848
849 /* Previous command is accepted when SCB clears */
850 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852 break;
853 cpu_relax();
854 if (unlikely(i > E100_WAIT_SCB_FAST))
855 udelay(5);
856 }
857 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
858 err = -EAGAIN;
859 goto err_unlock;
860 }
861
862 if (unlikely(cmd != cuc_resume))
863 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864 iowrite8(cmd, &nic->csr->scb.cmd_lo);
865
866err_unlock:
867 spin_unlock_irqrestore(&nic->cmd_lock, flags);
868
869 return err;
870}
871
872static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874{
875 struct cb *cb;
876 unsigned long flags;
877 int err = 0;
878
879 spin_lock_irqsave(&nic->cb_lock, flags);
880
881 if (unlikely(!nic->cbs_avail)) {
882 err = -ENOMEM;
883 goto err_unlock;
884 }
885
886 cb = nic->cb_to_use;
887 nic->cb_to_use = cb->next;
888 nic->cbs_avail--;
889 cb->skb = skb;
890
891 if (unlikely(!nic->cbs_avail))
892 err = -ENOSPC;
893
894 cb_prepare(nic, cb, skb);
895
896 /* Order is important otherwise we'll be in a race with h/w:
897 * set S-bit in current first, then clear S-bit in previous. */
898 cb->command |= cpu_to_le16(cb_s);
899 wmb();
900 cb->prev->command &= cpu_to_le16(~cb_s);
901
902 while (nic->cb_to_send != nic->cb_to_use) {
903 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
904 nic->cb_to_send->dma_addr))) {
905 /* Ok, here's where things get sticky. It's
906 * possible that we can't schedule the command
907 * because the controller is too busy, so
908 * let's just queue the command and try again
909 * when another command is scheduled. */
910 if (err == -ENOSPC) {
911 //request a reset
912 schedule_work(&nic->tx_timeout_task);
913 }
914 break;
915 } else {
916 nic->cuc_cmd = cuc_resume;
917 nic->cb_to_send = nic->cb_to_send->next;
918 }
919 }
920
921err_unlock:
922 spin_unlock_irqrestore(&nic->cb_lock, flags);
923
924 return err;
925}
926
927static int mdio_read(struct net_device *netdev, int addr, int reg)
928{
929 struct nic *nic = netdev_priv(netdev);
930 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
931}
932
933static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
934{
935 struct nic *nic = netdev_priv(netdev);
936
937 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
938}
939
940/* the standard mdio_ctrl() function for usual MII-compliant hardware */
941static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
942{
943 u32 data_out = 0;
944 unsigned int i;
945 unsigned long flags;
946
947
948 /*
949 * Stratus87247: we shouldn't be writing the MDI control
950 * register until the Ready bit shows True. Also, since
951 * manipulation of the MDI control registers is a multi-step
952 * procedure it should be done under lock.
953 */
954 spin_lock_irqsave(&nic->mdio_lock, flags);
955 for (i = 100; i; --i) {
956 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
957 break;
958 udelay(20);
959 }
960 if (unlikely(!i)) {
961 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
962 spin_unlock_irqrestore(&nic->mdio_lock, flags);
963 return 0; /* No way to indicate timeout error */
964 }
965 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
966
967 for (i = 0; i < 100; i++) {
968 udelay(20);
969 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
970 break;
971 }
972 spin_unlock_irqrestore(&nic->mdio_lock, flags);
973 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
974 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
975 dir == mdi_read ? "READ" : "WRITE",
976 addr, reg, data, data_out);
977 return (u16)data_out;
978}
979
980/* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
981static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
982 u32 addr,
983 u32 dir,
984 u32 reg,
985 u16 data)
986{
987 if ((reg == MII_BMCR) && (dir == mdi_write)) {
988 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
989 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
990 MII_ADVERTISE);
991
992 /*
993 * Workaround Si issue where sometimes the part will not
994 * autoneg to 100Mbps even when advertised.
995 */
996 if (advert & ADVERTISE_100FULL)
997 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
998 else if (advert & ADVERTISE_100HALF)
999 data |= BMCR_SPEED100;
1000 }
1001 }
1002 return mdio_ctrl_hw(nic, addr, dir, reg, data);
1003}
1004
1005/* Fully software-emulated mdio_ctrl() function for cards without
1006 * MII-compliant PHYs.
1007 * For now, this is mainly geared towards 80c24 support; in case of further
1008 * requirements for other types (i82503, ...?) either extend this mechanism
1009 * or split it, whichever is cleaner.
1010 */
1011static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1012 u32 addr,
1013 u32 dir,
1014 u32 reg,
1015 u16 data)
1016{
1017 /* might need to allocate a netdev_priv'ed register array eventually
1018 * to be able to record state changes, but for now
1019 * some fully hardcoded register handling ought to be ok I guess. */
1020
1021 if (dir == mdi_read) {
1022 switch (reg) {
1023 case MII_BMCR:
1024 /* Auto-negotiation, right? */
1025 return BMCR_ANENABLE |
1026 BMCR_FULLDPLX;
1027 case MII_BMSR:
1028 return BMSR_LSTATUS /* for mii_link_ok() */ |
1029 BMSR_ANEGCAPABLE |
1030 BMSR_10FULL;
1031 case MII_ADVERTISE:
1032 /* 80c24 is a "combo card" PHY, right? */
1033 return ADVERTISE_10HALF |
1034 ADVERTISE_10FULL;
1035 default:
1036 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1037 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038 dir == mdi_read ? "READ" : "WRITE",
1039 addr, reg, data);
1040 return 0xFFFF;
1041 }
1042 } else {
1043 switch (reg) {
1044 default:
1045 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1046 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1047 dir == mdi_read ? "READ" : "WRITE",
1048 addr, reg, data);
1049 return 0xFFFF;
1050 }
1051 }
1052}
1053static inline int e100_phy_supports_mii(struct nic *nic)
1054{
1055 /* for now, just check it by comparing whether we
1056 are using MII software emulation.
1057 */
1058 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1059}
1060
1061static void e100_get_defaults(struct nic *nic)
1062{
1063 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1064 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1065
1066 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1067 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1068 if (nic->mac == mac_unknown)
1069 nic->mac = mac_82557_D100_A;
1070
1071 nic->params.rfds = rfds;
1072 nic->params.cbs = cbs;
1073
1074 /* Quadwords to DMA into FIFO before starting frame transmit */
1075 nic->tx_threshold = 0xE0;
1076
1077 /* no interrupt for every tx completion, delay = 256us if not 557 */
1078 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1079 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1080
1081 /* Template for a freshly allocated RFD */
1082 nic->blank_rfd.command = 0;
1083 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1084 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1085
1086 /* MII setup */
1087 nic->mii.phy_id_mask = 0x1F;
1088 nic->mii.reg_num_mask = 0x1F;
1089 nic->mii.dev = nic->netdev;
1090 nic->mii.mdio_read = mdio_read;
1091 nic->mii.mdio_write = mdio_write;
1092}
1093
1094static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095{
1096 struct config *config = &cb->u.config;
1097 u8 *c = (u8 *)config;
1098 struct net_device *netdev = nic->netdev;
1099
1100 cb->command = cpu_to_le16(cb_config);
1101
1102 memset(config, 0, sizeof(struct config));
1103
1104 config->byte_count = 0x16; /* bytes in this struct */
1105 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1106 config->direct_rx_dma = 0x1; /* reserved */
1107 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1108 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1109 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1110 config->tx_underrun_retry = 0x3; /* # of underrun retries */
1111 if (e100_phy_supports_mii(nic))
1112 config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1113 config->pad10 = 0x6;
1114 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1115 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1116 config->ifs = 0x6; /* x16 = inter frame spacing */
1117 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1118 config->pad15_1 = 0x1;
1119 config->pad15_2 = 0x1;
1120 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1121 config->fc_delay_hi = 0x40; /* time delay for fc frame */
1122 config->tx_padding = 0x1; /* 1=pad short frames */
1123 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1124 config->pad18 = 0x1;
1125 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1126 config->pad20_1 = 0x1F;
1127 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1128 config->pad21_1 = 0x5;
1129
1130 config->adaptive_ifs = nic->adaptive_ifs;
1131 config->loopback = nic->loopback;
1132
1133 if (nic->mii.force_media && nic->mii.full_duplex)
1134 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1135
1136 if (nic->flags & promiscuous || nic->loopback) {
1137 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1138 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1139 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1140 }
1141
1142 if (unlikely(netdev->features & NETIF_F_RXFCS))
1143 config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1144
1145 if (nic->flags & multicast_all)
1146 config->multicast_all = 0x1; /* 1=accept, 0=no */
1147
1148 /* disable WoL when up */
1149 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1150 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1151
1152 if (nic->mac >= mac_82558_D101_A4) {
1153 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1154 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1155 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1156 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1157 if (nic->mac >= mac_82559_D101M) {
1158 config->tno_intr = 0x1; /* TCO stats enable */
1159 /* Enable TCO in extended config */
1160 if (nic->mac >= mac_82551_10) {
1161 config->byte_count = 0x20; /* extended bytes */
1162 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1163 }
1164 } else {
1165 config->standard_stat_counter = 0x0;
1166 }
1167 }
1168
1169 if (netdev->features & NETIF_F_RXALL) {
1170 config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1171 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1172 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1173 }
1174
1175 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1176 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1177 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1178 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1179 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1180 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1181 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1184}
1185
1186/*************************************************************************
1187* CPUSaver parameters
1188*
1189* All CPUSaver parameters are 16-bit literals that are part of a
1190* "move immediate value" instruction. By changing the value of
1191* the literal in the instruction before the code is loaded, the
1192* driver can change the algorithm.
1193*
1194* INTDELAY - This loads the dead-man timer with its initial value.
1195* When this timer expires the interrupt is asserted, and the
1196* timer is reset each time a new packet is received. (see
1197* BUNDLEMAX below to set the limit on number of chained packets)
1198* The current default is 0x600 or 1536. Experiments show that
1199* the value should probably stay within the 0x200 - 0x1000.
1200*
1201* BUNDLEMAX -
1202* This sets the maximum number of frames that will be bundled. In
1203* some situations, such as the TCP windowing algorithm, it may be
1204* better to limit the growth of the bundle size than let it go as
1205* high as it can, because that could cause too much added latency.
1206* The default is six, because this is the number of packets in the
1207* default TCP window size. A value of 1 would make CPUSaver indicate
1208* an interrupt for every frame received. If you do not want to put
1209* a limit on the bundle size, set this value to xFFFF.
1210*
1211* BUNDLESMALL -
1212* This contains a bit-mask describing the minimum size frame that
1213* will be bundled. The default masks the lower 7 bits, which means
1214* that any frame less than 128 bytes in length will not be bundled,
1215* but will instead immediately generate an interrupt. This does
1216* not affect the current bundle in any way. Any frame that is 128
1217* bytes or large will be bundled normally. This feature is meant
1218* to provide immediate indication of ACK frames in a TCP environment.
1219* Customers were seeing poor performance when a machine with CPUSaver
1220* enabled was sending but not receiving. The delay introduced when
1221* the ACKs were received was enough to reduce total throughput, because
1222* the sender would sit idle until the ACK was finally seen.
1223*
1224* The current default is 0xFF80, which masks out the lower 7 bits.
1225* This means that any frame which is x7F (127) bytes or smaller
1226* will cause an immediate interrupt. Because this value must be a
1227* bit mask, there are only a few valid values that can be used. To
1228* turn this feature off, the driver can write the value xFFFF to the
1229* lower word of this instruction (in the same way that the other
1230* parameters are used). Likewise, a value of 0xF800 (2047) would
1231* cause an interrupt to be generated for every frame, because all
1232* standard Ethernet frames are <= 2047 bytes in length.
1233*************************************************************************/
1234
1235/* if you wish to disable the ucode functionality, while maintaining the
1236 * workarounds it provides, set the following defines to:
1237 * BUNDLESMALL 0
1238 * BUNDLEMAX 1
1239 * INTDELAY 1
1240 */
1241#define BUNDLESMALL 1
1242#define BUNDLEMAX (u16)6
1243#define INTDELAY (u16)1536 /* 0x600 */
1244
1245/* Initialize firmware */
1246static const struct firmware *e100_request_firmware(struct nic *nic)
1247{
1248 const char *fw_name;
1249 const struct firmware *fw = nic->fw;
1250 u8 timer, bundle, min_size;
1251 int err = 0;
1252
1253 /* do not load u-code for ICH devices */
1254 if (nic->flags & ich)
1255 return NULL;
1256
1257 /* Search for ucode match against h/w revision */
1258 if (nic->mac == mac_82559_D101M)
1259 fw_name = FIRMWARE_D101M;
1260 else if (nic->mac == mac_82559_D101S)
1261 fw_name = FIRMWARE_D101S;
1262 else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
1263 fw_name = FIRMWARE_D102E;
1264 else /* No ucode on other devices */
1265 return NULL;
1266
1267 /* If the firmware has not previously been loaded, request a pointer
1268 * to it. If it was previously loaded, we are reinitializing the
1269 * adapter, possibly in a resume from hibernate, in which case
1270 * request_firmware() cannot be used.
1271 */
1272 if (!fw)
1273 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1274
1275 if (err) {
1276 netif_err(nic, probe, nic->netdev,
1277 "Failed to load firmware \"%s\": %d\n",
1278 fw_name, err);
1279 return ERR_PTR(err);
1280 }
1281
1282 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1283 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1284 if (fw->size != UCODE_SIZE * 4 + 3) {
1285 netif_err(nic, probe, nic->netdev,
1286 "Firmware \"%s\" has wrong size %zu\n",
1287 fw_name, fw->size);
1288 release_firmware(fw);
1289 return ERR_PTR(-EINVAL);
1290 }
1291
1292 /* Read timer, bundle and min_size from end of firmware blob */
1293 timer = fw->data[UCODE_SIZE * 4];
1294 bundle = fw->data[UCODE_SIZE * 4 + 1];
1295 min_size = fw->data[UCODE_SIZE * 4 + 2];
1296
1297 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1298 min_size >= UCODE_SIZE) {
1299 netif_err(nic, probe, nic->netdev,
1300 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1301 fw_name, timer, bundle, min_size);
1302 release_firmware(fw);
1303 return ERR_PTR(-EINVAL);
1304 }
1305
1306 /* OK, firmware is validated and ready to use. Save a pointer
1307 * to it in the nic */
1308 nic->fw = fw;
1309 return fw;
1310}
1311
1312static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1313 struct sk_buff *skb)
1314{
1315 const struct firmware *fw = (void *)skb;
1316 u8 timer, bundle, min_size;
1317
1318 /* It's not a real skb; we just abused the fact that e100_exec_cb
1319 will pass it through to here... */
1320 cb->skb = NULL;
1321
1322 /* firmware is stored as little endian already */
1323 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1324
1325 /* Read timer, bundle and min_size from end of firmware blob */
1326 timer = fw->data[UCODE_SIZE * 4];
1327 bundle = fw->data[UCODE_SIZE * 4 + 1];
1328 min_size = fw->data[UCODE_SIZE * 4 + 2];
1329
1330 /* Insert user-tunable settings in cb->u.ucode */
1331 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1332 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1333 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1334 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1335 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1336 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1337
1338 cb->command = cpu_to_le16(cb_ucode | cb_el);
1339}
1340
1341static inline int e100_load_ucode_wait(struct nic *nic)
1342{
1343 const struct firmware *fw;
1344 int err = 0, counter = 50;
1345 struct cb *cb = nic->cb_to_clean;
1346
1347 fw = e100_request_firmware(nic);
1348 /* If it's NULL, then no ucode is required */
1349 if (!fw || IS_ERR(fw))
1350 return PTR_ERR(fw);
1351
1352 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1353 netif_err(nic, probe, nic->netdev,
1354 "ucode cmd failed with error %d\n", err);
1355
1356 /* must restart cuc */
1357 nic->cuc_cmd = cuc_start;
1358
1359 /* wait for completion */
1360 e100_write_flush(nic);
1361 udelay(10);
1362
1363 /* wait for possibly (ouch) 500ms */
1364 while (!(cb->status & cpu_to_le16(cb_complete))) {
1365 msleep(10);
1366 if (!--counter) break;
1367 }
1368
1369 /* ack any interrupts, something could have been set */
1370 iowrite8(~0, &nic->csr->scb.stat_ack);
1371
1372 /* if the command failed, or is not OK, notify and return */
1373 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1374 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1375 err = -EPERM;
1376 }
1377
1378 return err;
1379}
1380
1381static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1382 struct sk_buff *skb)
1383{
1384 cb->command = cpu_to_le16(cb_iaaddr);
1385 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1386}
1387
1388static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1389{
1390 cb->command = cpu_to_le16(cb_dump);
1391 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1392 offsetof(struct mem, dump_buf));
1393}
1394
1395static int e100_phy_check_without_mii(struct nic *nic)
1396{
1397 u8 phy_type;
1398 int without_mii;
1399
1400 phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1401
1402 switch (phy_type) {
1403 case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1404 case I82503: /* Non-MII PHY; UNTESTED! */
1405 case S80C24: /* Non-MII PHY; tested and working */
1406 /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1407 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1408 * doesn't have a programming interface of any sort. The
1409 * media is sensed automatically based on how the link partner
1410 * is configured. This is, in essence, manual configuration.
1411 */
1412 netif_info(nic, probe, nic->netdev,
1413 "found MII-less i82503 or 80c24 or other PHY\n");
1414
1415 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1416 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1417
1418 /* these might be needed for certain MII-less cards...
1419 * nic->flags |= ich;
1420 * nic->flags |= ich_10h_workaround; */
1421
1422 without_mii = 1;
1423 break;
1424 default:
1425 without_mii = 0;
1426 break;
1427 }
1428 return without_mii;
1429}
1430
1431#define NCONFIG_AUTO_SWITCH 0x0080
1432#define MII_NSC_CONG MII_RESV1
1433#define NSC_CONG_ENABLE 0x0100
1434#define NSC_CONG_TXREADY 0x0400
1435#define ADVERTISE_FC_SUPPORTED 0x0400
1436static int e100_phy_init(struct nic *nic)
1437{
1438 struct net_device *netdev = nic->netdev;
1439 u32 addr;
1440 u16 bmcr, stat, id_lo, id_hi, cong;
1441
1442 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1443 for (addr = 0; addr < 32; addr++) {
1444 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1445 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1446 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1447 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1448 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1449 break;
1450 }
1451 if (addr == 32) {
1452 /* uhoh, no PHY detected: check whether we seem to be some
1453 * weird, rare variant which is *known* to not have any MII.
1454 * But do this AFTER MII checking only, since this does
1455 * lookup of EEPROM values which may easily be unreliable. */
1456 if (e100_phy_check_without_mii(nic))
1457 return 0; /* simply return and hope for the best */
1458 else {
1459 /* for unknown cases log a fatal error */
1460 netif_err(nic, hw, nic->netdev,
1461 "Failed to locate any known PHY, aborting\n");
1462 return -EAGAIN;
1463 }
1464 } else
1465 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1466 "phy_addr = %d\n", nic->mii.phy_id);
1467
1468 /* Get phy ID */
1469 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1470 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1471 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1472 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1473 "phy ID = 0x%08X\n", nic->phy);
1474
1475 /* Select the phy and isolate the rest */
1476 for (addr = 0; addr < 32; addr++) {
1477 if (addr != nic->mii.phy_id) {
1478 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1479 } else if (nic->phy != phy_82552_v) {
1480 bmcr = mdio_read(netdev, addr, MII_BMCR);
1481 mdio_write(netdev, addr, MII_BMCR,
1482 bmcr & ~BMCR_ISOLATE);
1483 }
1484 }
1485 /*
1486 * Workaround for 82552:
1487 * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1488 * other phy_id's) using bmcr value from addr discovery loop above.
1489 */
1490 if (nic->phy == phy_82552_v)
1491 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1492 bmcr & ~BMCR_ISOLATE);
1493
1494 /* Handle National tx phys */
1495#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1496 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1497 /* Disable congestion control */
1498 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1499 cong |= NSC_CONG_TXREADY;
1500 cong &= ~NSC_CONG_ENABLE;
1501 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1502 }
1503
1504 if (nic->phy == phy_82552_v) {
1505 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1506
1507 /* assign special tweaked mdio_ctrl() function */
1508 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1509
1510 /* Workaround Si not advertising flow-control during autoneg */
1511 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1512 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1513
1514 /* Reset for the above changes to take effect */
1515 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1516 bmcr |= BMCR_RESET;
1517 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1518 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1519 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1520 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1521 /* enable/disable MDI/MDI-X auto-switching. */
1522 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1523 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1524 }
1525
1526 return 0;
1527}
1528
1529static int e100_hw_init(struct nic *nic)
1530{
1531 int err = 0;
1532
1533 e100_hw_reset(nic);
1534
1535 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1536 if (!in_interrupt() && (err = e100_self_test(nic)))
1537 return err;
1538
1539 if ((err = e100_phy_init(nic)))
1540 return err;
1541 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1542 return err;
1543 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1544 return err;
1545 if ((err = e100_load_ucode_wait(nic)))
1546 return err;
1547 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1548 return err;
1549 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1550 return err;
1551 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1552 nic->dma_addr + offsetof(struct mem, stats))))
1553 return err;
1554 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1555 return err;
1556
1557 e100_disable_irq(nic);
1558
1559 return 0;
1560}
1561
1562static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1563{
1564 struct net_device *netdev = nic->netdev;
1565 struct netdev_hw_addr *ha;
1566 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1567
1568 cb->command = cpu_to_le16(cb_multi);
1569 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1570 i = 0;
1571 netdev_for_each_mc_addr(ha, netdev) {
1572 if (i == count)
1573 break;
1574 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1575 ETH_ALEN);
1576 }
1577}
1578
1579static void e100_set_multicast_list(struct net_device *netdev)
1580{
1581 struct nic *nic = netdev_priv(netdev);
1582
1583 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1584 "mc_count=%d, flags=0x%04X\n",
1585 netdev_mc_count(netdev), netdev->flags);
1586
1587 if (netdev->flags & IFF_PROMISC)
1588 nic->flags |= promiscuous;
1589 else
1590 nic->flags &= ~promiscuous;
1591
1592 if (netdev->flags & IFF_ALLMULTI ||
1593 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1594 nic->flags |= multicast_all;
1595 else
1596 nic->flags &= ~multicast_all;
1597
1598 e100_exec_cb(nic, NULL, e100_configure);
1599 e100_exec_cb(nic, NULL, e100_multi);
1600}
1601
1602static void e100_update_stats(struct nic *nic)
1603{
1604 struct net_device *dev = nic->netdev;
1605 struct net_device_stats *ns = &dev->stats;
1606 struct stats *s = &nic->mem->stats;
1607 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1608 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1609 &s->complete;
1610
1611 /* Device's stats reporting may take several microseconds to
1612 * complete, so we're always waiting for results of the
1613 * previous command. */
1614
1615 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1616 *complete = 0;
1617 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1618 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1619 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1620 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1621 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1622 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1623 ns->collisions += nic->tx_collisions;
1624 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1625 le32_to_cpu(s->tx_lost_crs);
1626 nic->rx_short_frame_errors +=
1627 le32_to_cpu(s->rx_short_frame_errors);
1628 ns->rx_length_errors = nic->rx_short_frame_errors +
1629 nic->rx_over_length_errors;
1630 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1631 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1632 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1633 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1634 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1635 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1636 le32_to_cpu(s->rx_alignment_errors) +
1637 le32_to_cpu(s->rx_short_frame_errors) +
1638 le32_to_cpu(s->rx_cdt_errors);
1639 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1640 nic->tx_single_collisions +=
1641 le32_to_cpu(s->tx_single_collisions);
1642 nic->tx_multiple_collisions +=
1643 le32_to_cpu(s->tx_multiple_collisions);
1644 if (nic->mac >= mac_82558_D101_A4) {
1645 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1646 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1647 nic->rx_fc_unsupported +=
1648 le32_to_cpu(s->fc_rcv_unsupported);
1649 if (nic->mac >= mac_82559_D101M) {
1650 nic->tx_tco_frames +=
1651 le16_to_cpu(s->xmt_tco_frames);
1652 nic->rx_tco_frames +=
1653 le16_to_cpu(s->rcv_tco_frames);
1654 }
1655 }
1656 }
1657
1658
1659 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1660 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1661 "exec cuc_dump_reset failed\n");
1662}
1663
1664static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1665{
1666 /* Adjust inter-frame-spacing (IFS) between two transmits if
1667 * we're getting collisions on a half-duplex connection. */
1668
1669 if (duplex == DUPLEX_HALF) {
1670 u32 prev = nic->adaptive_ifs;
1671 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1672
1673 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1674 (nic->tx_frames > min_frames)) {
1675 if (nic->adaptive_ifs < 60)
1676 nic->adaptive_ifs += 5;
1677 } else if (nic->tx_frames < min_frames) {
1678 if (nic->adaptive_ifs >= 5)
1679 nic->adaptive_ifs -= 5;
1680 }
1681 if (nic->adaptive_ifs != prev)
1682 e100_exec_cb(nic, NULL, e100_configure);
1683 }
1684}
1685
1686static void e100_watchdog(unsigned long data)
1687{
1688 struct nic *nic = (struct nic *)data;
1689 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1690 u32 speed;
1691
1692 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1693 "right now = %ld\n", jiffies);
1694
1695 /* mii library handles link maintenance tasks */
1696
1697 mii_ethtool_gset(&nic->mii, &cmd);
1698 speed = ethtool_cmd_speed(&cmd);
1699
1700 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1701 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1702 speed == SPEED_100 ? 100 : 10,
1703 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1704 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1705 netdev_info(nic->netdev, "NIC Link is Down\n");
1706 }
1707
1708 mii_check_link(&nic->mii);
1709
1710 /* Software generated interrupt to recover from (rare) Rx
1711 * allocation failure.
1712 * Unfortunately have to use a spinlock to not re-enable interrupts
1713 * accidentally, due to hardware that shares a register between the
1714 * interrupt mask bit and the SW Interrupt generation bit */
1715 spin_lock_irq(&nic->cmd_lock);
1716 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1717 e100_write_flush(nic);
1718 spin_unlock_irq(&nic->cmd_lock);
1719
1720 e100_update_stats(nic);
1721 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1722
1723 if (nic->mac <= mac_82557_D100_C)
1724 /* Issue a multicast command to workaround a 557 lock up */
1725 e100_set_multicast_list(nic->netdev);
1726
1727 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1728 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1729 nic->flags |= ich_10h_workaround;
1730 else
1731 nic->flags &= ~ich_10h_workaround;
1732
1733 mod_timer(&nic->watchdog,
1734 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1735}
1736
1737static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1738 struct sk_buff *skb)
1739{
1740 cb->command = nic->tx_command;
1741
1742 /*
1743 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1744 * testing, ie sending frames with bad CRC.
1745 */
1746 if (unlikely(skb->no_fcs))
1747 cb->command |= __constant_cpu_to_le16(cb_tx_nc);
1748 else
1749 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
1750
1751 /* interrupt every 16 packets regardless of delay */
1752 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1753 cb->command |= cpu_to_le16(cb_i);
1754 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1755 cb->u.tcb.tcb_byte_count = 0;
1756 cb->u.tcb.threshold = nic->tx_threshold;
1757 cb->u.tcb.tbd_count = 1;
1758 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1759 skb->data, skb->len, PCI_DMA_TODEVICE));
1760 /* check for mapping failure? */
1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1762 skb_tx_timestamp(skb);
1763}
1764
1765static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1766 struct net_device *netdev)
1767{
1768 struct nic *nic = netdev_priv(netdev);
1769 int err;
1770
1771 if (nic->flags & ich_10h_workaround) {
1772 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1773 Issue a NOP command followed by a 1us delay before
1774 issuing the Tx command. */
1775 if (e100_exec_cmd(nic, cuc_nop, 0))
1776 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1777 "exec cuc_nop failed\n");
1778 udelay(1);
1779 }
1780
1781 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1782
1783 switch (err) {
1784 case -ENOSPC:
1785 /* We queued the skb, but now we're out of space. */
1786 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1787 "No space for CB\n");
1788 netif_stop_queue(netdev);
1789 break;
1790 case -ENOMEM:
1791 /* This is a hard error - log it. */
1792 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1793 "Out of Tx resources, returning skb\n");
1794 netif_stop_queue(netdev);
1795 return NETDEV_TX_BUSY;
1796 }
1797
1798 return NETDEV_TX_OK;
1799}
1800
1801static int e100_tx_clean(struct nic *nic)
1802{
1803 struct net_device *dev = nic->netdev;
1804 struct cb *cb;
1805 int tx_cleaned = 0;
1806
1807 spin_lock(&nic->cb_lock);
1808
1809 /* Clean CBs marked complete */
1810 for (cb = nic->cb_to_clean;
1811 cb->status & cpu_to_le16(cb_complete);
1812 cb = nic->cb_to_clean = cb->next) {
1813 rmb(); /* read skb after status */
1814 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1815 "cb[%d]->status = 0x%04X\n",
1816 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1817 cb->status);
1818
1819 if (likely(cb->skb != NULL)) {
1820 dev->stats.tx_packets++;
1821 dev->stats.tx_bytes += cb->skb->len;
1822
1823 pci_unmap_single(nic->pdev,
1824 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1825 le16_to_cpu(cb->u.tcb.tbd.size),
1826 PCI_DMA_TODEVICE);
1827 dev_kfree_skb_any(cb->skb);
1828 cb->skb = NULL;
1829 tx_cleaned = 1;
1830 }
1831 cb->status = 0;
1832 nic->cbs_avail++;
1833 }
1834
1835 spin_unlock(&nic->cb_lock);
1836
1837 /* Recover from running out of Tx resources in xmit_frame */
1838 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1839 netif_wake_queue(nic->netdev);
1840
1841 return tx_cleaned;
1842}
1843
1844static void e100_clean_cbs(struct nic *nic)
1845{
1846 if (nic->cbs) {
1847 while (nic->cbs_avail != nic->params.cbs.count) {
1848 struct cb *cb = nic->cb_to_clean;
1849 if (cb->skb) {
1850 pci_unmap_single(nic->pdev,
1851 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1852 le16_to_cpu(cb->u.tcb.tbd.size),
1853 PCI_DMA_TODEVICE);
1854 dev_kfree_skb(cb->skb);
1855 }
1856 nic->cb_to_clean = nic->cb_to_clean->next;
1857 nic->cbs_avail++;
1858 }
1859 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1860 nic->cbs = NULL;
1861 nic->cbs_avail = 0;
1862 }
1863 nic->cuc_cmd = cuc_start;
1864 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1865 nic->cbs;
1866}
1867
1868static int e100_alloc_cbs(struct nic *nic)
1869{
1870 struct cb *cb;
1871 unsigned int i, count = nic->params.cbs.count;
1872
1873 nic->cuc_cmd = cuc_start;
1874 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1875 nic->cbs_avail = 0;
1876
1877 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1878 &nic->cbs_dma_addr);
1879 if (!nic->cbs)
1880 return -ENOMEM;
1881 memset(nic->cbs, 0, count * sizeof(struct cb));
1882
1883 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1884 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1885 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1886
1887 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1888 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1889 ((i+1) % count) * sizeof(struct cb));
1890 }
1891
1892 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1893 nic->cbs_avail = count;
1894
1895 return 0;
1896}
1897
1898static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1899{
1900 if (!nic->rxs) return;
1901 if (RU_SUSPENDED != nic->ru_running) return;
1902
1903 /* handle init time starts */
1904 if (!rx) rx = nic->rxs;
1905
1906 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1907 if (rx->skb) {
1908 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1909 nic->ru_running = RU_RUNNING;
1910 }
1911}
1912
1913#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1914static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1915{
1916 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1917 return -ENOMEM;
1918
1919 /* Init, and map the RFD. */
1920 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1921 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1922 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1923
1924 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1925 dev_kfree_skb_any(rx->skb);
1926 rx->skb = NULL;
1927 rx->dma_addr = 0;
1928 return -ENOMEM;
1929 }
1930
1931 /* Link the RFD to end of RFA by linking previous RFD to
1932 * this one. We are safe to touch the previous RFD because
1933 * it is protected by the before last buffer's el bit being set */
1934 if (rx->prev->skb) {
1935 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1936 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1937 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1938 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1939 }
1940
1941 return 0;
1942}
1943
1944static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1945 unsigned int *work_done, unsigned int work_to_do)
1946{
1947 struct net_device *dev = nic->netdev;
1948 struct sk_buff *skb = rx->skb;
1949 struct rfd *rfd = (struct rfd *)skb->data;
1950 u16 rfd_status, actual_size;
1951 u16 fcs_pad = 0;
1952
1953 if (unlikely(work_done && *work_done >= work_to_do))
1954 return -EAGAIN;
1955
1956 /* Need to sync before taking a peek at cb_complete bit */
1957 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1958 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1959 rfd_status = le16_to_cpu(rfd->status);
1960
1961 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1962 "status=0x%04X\n", rfd_status);
1963 rmb(); /* read size after status bit */
1964
1965 /* If data isn't ready, nothing to indicate */
1966 if (unlikely(!(rfd_status & cb_complete))) {
1967 /* If the next buffer has the el bit, but we think the receiver
1968 * is still running, check to see if it really stopped while
1969 * we had interrupts off.
1970 * This allows for a fast restart without re-enabling
1971 * interrupts */
1972 if ((le16_to_cpu(rfd->command) & cb_el) &&
1973 (RU_RUNNING == nic->ru_running))
1974
1975 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1976 nic->ru_running = RU_SUSPENDED;
1977 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1978 sizeof(struct rfd),
1979 PCI_DMA_FROMDEVICE);
1980 return -ENODATA;
1981 }
1982
1983 /* Get actual data size */
1984 if (unlikely(dev->features & NETIF_F_RXFCS))
1985 fcs_pad = 4;
1986 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1987 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1988 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1989
1990 /* Get data */
1991 pci_unmap_single(nic->pdev, rx->dma_addr,
1992 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1993
1994 /* If this buffer has the el bit, but we think the receiver
1995 * is still running, check to see if it really stopped while
1996 * we had interrupts off.
1997 * This allows for a fast restart without re-enabling interrupts.
1998 * This can happen when the RU sees the size change but also sees
1999 * the el bit set. */
2000 if ((le16_to_cpu(rfd->command) & cb_el) &&
2001 (RU_RUNNING == nic->ru_running)) {
2002
2003 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2004 nic->ru_running = RU_SUSPENDED;
2005 }
2006
2007 /* Pull off the RFD and put the actual data (minus eth hdr) */
2008 skb_reserve(skb, sizeof(struct rfd));
2009 skb_put(skb, actual_size);
2010 skb->protocol = eth_type_trans(skb, nic->netdev);
2011
2012 /* If we are receiving all frames, then don't bother
2013 * checking for errors.
2014 */
2015 if (unlikely(dev->features & NETIF_F_RXALL)) {
2016 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2017 /* Received oversized frame, but keep it. */
2018 nic->rx_over_length_errors++;
2019 goto process_skb;
2020 }
2021
2022 if (unlikely(!(rfd_status & cb_ok))) {
2023 /* Don't indicate if hardware indicates errors */
2024 dev_kfree_skb_any(skb);
2025 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2026 /* Don't indicate oversized frames */
2027 nic->rx_over_length_errors++;
2028 dev_kfree_skb_any(skb);
2029 } else {
2030process_skb:
2031 dev->stats.rx_packets++;
2032 dev->stats.rx_bytes += (actual_size - fcs_pad);
2033 netif_receive_skb(skb);
2034 if (work_done)
2035 (*work_done)++;
2036 }
2037
2038 rx->skb = NULL;
2039
2040 return 0;
2041}
2042
2043static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2044 unsigned int work_to_do)
2045{
2046 struct rx *rx;
2047 int restart_required = 0, err = 0;
2048 struct rx *old_before_last_rx, *new_before_last_rx;
2049 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2050
2051 /* Indicate newly arrived packets */
2052 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2053 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2054 /* Hit quota or no more to clean */
2055 if (-EAGAIN == err || -ENODATA == err)
2056 break;
2057 }
2058
2059
2060 /* On EAGAIN, hit quota so have more work to do, restart once
2061 * cleanup is complete.
2062 * Else, are we already rnr? then pay attention!!! this ensures that
2063 * the state machine progression never allows a start with a
2064 * partially cleaned list, avoiding a race between hardware
2065 * and rx_to_clean when in NAPI mode */
2066 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2067 restart_required = 1;
2068
2069 old_before_last_rx = nic->rx_to_use->prev->prev;
2070 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2071
2072 /* Alloc new skbs to refill list */
2073 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2074 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2075 break; /* Better luck next time (see watchdog) */
2076 }
2077
2078 new_before_last_rx = nic->rx_to_use->prev->prev;
2079 if (new_before_last_rx != old_before_last_rx) {
2080 /* Set the el-bit on the buffer that is before the last buffer.
2081 * This lets us update the next pointer on the last buffer
2082 * without worrying about hardware touching it.
2083 * We set the size to 0 to prevent hardware from touching this
2084 * buffer.
2085 * When the hardware hits the before last buffer with el-bit
2086 * and size of 0, it will RNR interrupt, the RUS will go into
2087 * the No Resources state. It will not complete nor write to
2088 * this buffer. */
2089 new_before_last_rfd =
2090 (struct rfd *)new_before_last_rx->skb->data;
2091 new_before_last_rfd->size = 0;
2092 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2093 pci_dma_sync_single_for_device(nic->pdev,
2094 new_before_last_rx->dma_addr, sizeof(struct rfd),
2095 PCI_DMA_BIDIRECTIONAL);
2096
2097 /* Now that we have a new stopping point, we can clear the old
2098 * stopping point. We must sync twice to get the proper
2099 * ordering on the hardware side of things. */
2100 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2101 pci_dma_sync_single_for_device(nic->pdev,
2102 old_before_last_rx->dma_addr, sizeof(struct rfd),
2103 PCI_DMA_BIDIRECTIONAL);
2104 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2105 + ETH_FCS_LEN);
2106 pci_dma_sync_single_for_device(nic->pdev,
2107 old_before_last_rx->dma_addr, sizeof(struct rfd),
2108 PCI_DMA_BIDIRECTIONAL);
2109 }
2110
2111 if (restart_required) {
2112 // ack the rnr?
2113 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2114 e100_start_receiver(nic, nic->rx_to_clean);
2115 if (work_done)
2116 (*work_done)++;
2117 }
2118}
2119
2120static void e100_rx_clean_list(struct nic *nic)
2121{
2122 struct rx *rx;
2123 unsigned int i, count = nic->params.rfds.count;
2124
2125 nic->ru_running = RU_UNINITIALIZED;
2126
2127 if (nic->rxs) {
2128 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2129 if (rx->skb) {
2130 pci_unmap_single(nic->pdev, rx->dma_addr,
2131 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2132 dev_kfree_skb(rx->skb);
2133 }
2134 }
2135 kfree(nic->rxs);
2136 nic->rxs = NULL;
2137 }
2138
2139 nic->rx_to_use = nic->rx_to_clean = NULL;
2140}
2141
2142static int e100_rx_alloc_list(struct nic *nic)
2143{
2144 struct rx *rx;
2145 unsigned int i, count = nic->params.rfds.count;
2146 struct rfd *before_last;
2147
2148 nic->rx_to_use = nic->rx_to_clean = NULL;
2149 nic->ru_running = RU_UNINITIALIZED;
2150
2151 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2152 return -ENOMEM;
2153
2154 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2155 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2156 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2157 if (e100_rx_alloc_skb(nic, rx)) {
2158 e100_rx_clean_list(nic);
2159 return -ENOMEM;
2160 }
2161 }
2162 /* Set the el-bit on the buffer that is before the last buffer.
2163 * This lets us update the next pointer on the last buffer without
2164 * worrying about hardware touching it.
2165 * We set the size to 0 to prevent hardware from touching this buffer.
2166 * When the hardware hits the before last buffer with el-bit and size
2167 * of 0, it will RNR interrupt, the RU will go into the No Resources
2168 * state. It will not complete nor write to this buffer. */
2169 rx = nic->rxs->prev->prev;
2170 before_last = (struct rfd *)rx->skb->data;
2171 before_last->command |= cpu_to_le16(cb_el);
2172 before_last->size = 0;
2173 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2174 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2175
2176 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2177 nic->ru_running = RU_SUSPENDED;
2178
2179 return 0;
2180}
2181
2182static irqreturn_t e100_intr(int irq, void *dev_id)
2183{
2184 struct net_device *netdev = dev_id;
2185 struct nic *nic = netdev_priv(netdev);
2186 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2187
2188 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2189 "stat_ack = 0x%02X\n", stat_ack);
2190
2191 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2192 stat_ack == stat_ack_not_present) /* Hardware is ejected */
2193 return IRQ_NONE;
2194
2195 /* Ack interrupt(s) */
2196 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2197
2198 /* We hit Receive No Resource (RNR); restart RU after cleaning */
2199 if (stat_ack & stat_ack_rnr)
2200 nic->ru_running = RU_SUSPENDED;
2201
2202 if (likely(napi_schedule_prep(&nic->napi))) {
2203 e100_disable_irq(nic);
2204 __napi_schedule(&nic->napi);
2205 }
2206
2207 return IRQ_HANDLED;
2208}
2209
2210static int e100_poll(struct napi_struct *napi, int budget)
2211{
2212 struct nic *nic = container_of(napi, struct nic, napi);
2213 unsigned int work_done = 0;
2214
2215 e100_rx_clean(nic, &work_done, budget);
2216 e100_tx_clean(nic);
2217
2218 /* If budget not fully consumed, exit the polling mode */
2219 if (work_done < budget) {
2220 napi_complete(napi);
2221 e100_enable_irq(nic);
2222 }
2223
2224 return work_done;
2225}
2226
2227#ifdef CONFIG_NET_POLL_CONTROLLER
2228static void e100_netpoll(struct net_device *netdev)
2229{
2230 struct nic *nic = netdev_priv(netdev);
2231
2232 e100_disable_irq(nic);
2233 e100_intr(nic->pdev->irq, netdev);
2234 e100_tx_clean(nic);
2235 e100_enable_irq(nic);
2236}
2237#endif
2238
2239static int e100_set_mac_address(struct net_device *netdev, void *p)
2240{
2241 struct nic *nic = netdev_priv(netdev);
2242 struct sockaddr *addr = p;
2243
2244 if (!is_valid_ether_addr(addr->sa_data))
2245 return -EADDRNOTAVAIL;
2246
2247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2248 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2249
2250 return 0;
2251}
2252
2253static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2254{
2255 if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2256 return -EINVAL;
2257 netdev->mtu = new_mtu;
2258 return 0;
2259}
2260
2261static int e100_asf(struct nic *nic)
2262{
2263 /* ASF can be enabled from eeprom */
2264 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2265 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2266 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2267 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2268}
2269
2270static int e100_up(struct nic *nic)
2271{
2272 int err;
2273
2274 if ((err = e100_rx_alloc_list(nic)))
2275 return err;
2276 if ((err = e100_alloc_cbs(nic)))
2277 goto err_rx_clean_list;
2278 if ((err = e100_hw_init(nic)))
2279 goto err_clean_cbs;
2280 e100_set_multicast_list(nic->netdev);
2281 e100_start_receiver(nic, NULL);
2282 mod_timer(&nic->watchdog, jiffies);
2283 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2284 nic->netdev->name, nic->netdev)))
2285 goto err_no_irq;
2286 netif_wake_queue(nic->netdev);
2287 napi_enable(&nic->napi);
2288 /* enable ints _after_ enabling poll, preventing a race between
2289 * disable ints+schedule */
2290 e100_enable_irq(nic);
2291 return 0;
2292
2293err_no_irq:
2294 del_timer_sync(&nic->watchdog);
2295err_clean_cbs:
2296 e100_clean_cbs(nic);
2297err_rx_clean_list:
2298 e100_rx_clean_list(nic);
2299 return err;
2300}
2301
2302static void e100_down(struct nic *nic)
2303{
2304 /* wait here for poll to complete */
2305 napi_disable(&nic->napi);
2306 netif_stop_queue(nic->netdev);
2307 e100_hw_reset(nic);
2308 free_irq(nic->pdev->irq, nic->netdev);
2309 del_timer_sync(&nic->watchdog);
2310 netif_carrier_off(nic->netdev);
2311 e100_clean_cbs(nic);
2312 e100_rx_clean_list(nic);
2313}
2314
2315static void e100_tx_timeout(struct net_device *netdev)
2316{
2317 struct nic *nic = netdev_priv(netdev);
2318
2319 /* Reset outside of interrupt context, to avoid request_irq
2320 * in interrupt context */
2321 schedule_work(&nic->tx_timeout_task);
2322}
2323
2324static void e100_tx_timeout_task(struct work_struct *work)
2325{
2326 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2327 struct net_device *netdev = nic->netdev;
2328
2329 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2330 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2331
2332 rtnl_lock();
2333 if (netif_running(netdev)) {
2334 e100_down(netdev_priv(netdev));
2335 e100_up(netdev_priv(netdev));
2336 }
2337 rtnl_unlock();
2338}
2339
2340static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2341{
2342 int err;
2343 struct sk_buff *skb;
2344
2345 /* Use driver resources to perform internal MAC or PHY
2346 * loopback test. A single packet is prepared and transmitted
2347 * in loopback mode, and the test passes if the received
2348 * packet compares byte-for-byte to the transmitted packet. */
2349
2350 if ((err = e100_rx_alloc_list(nic)))
2351 return err;
2352 if ((err = e100_alloc_cbs(nic)))
2353 goto err_clean_rx;
2354
2355 /* ICH PHY loopback is broken so do MAC loopback instead */
2356 if (nic->flags & ich && loopback_mode == lb_phy)
2357 loopback_mode = lb_mac;
2358
2359 nic->loopback = loopback_mode;
2360 if ((err = e100_hw_init(nic)))
2361 goto err_loopback_none;
2362
2363 if (loopback_mode == lb_phy)
2364 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2365 BMCR_LOOPBACK);
2366
2367 e100_start_receiver(nic, NULL);
2368
2369 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2370 err = -ENOMEM;
2371 goto err_loopback_none;
2372 }
2373 skb_put(skb, ETH_DATA_LEN);
2374 memset(skb->data, 0xFF, ETH_DATA_LEN);
2375 e100_xmit_frame(skb, nic->netdev);
2376
2377 msleep(10);
2378
2379 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2380 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
2381
2382 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2383 skb->data, ETH_DATA_LEN))
2384 err = -EAGAIN;
2385
2386err_loopback_none:
2387 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2388 nic->loopback = lb_none;
2389 e100_clean_cbs(nic);
2390 e100_hw_reset(nic);
2391err_clean_rx:
2392 e100_rx_clean_list(nic);
2393 return err;
2394}
2395
2396#define MII_LED_CONTROL 0x1B
2397#define E100_82552_LED_OVERRIDE 0x19
2398#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2399#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2400
2401static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2402{
2403 struct nic *nic = netdev_priv(netdev);
2404 return mii_ethtool_gset(&nic->mii, cmd);
2405}
2406
2407static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2408{
2409 struct nic *nic = netdev_priv(netdev);
2410 int err;
2411
2412 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2413 err = mii_ethtool_sset(&nic->mii, cmd);
2414 e100_exec_cb(nic, NULL, e100_configure);
2415
2416 return err;
2417}
2418
2419static void e100_get_drvinfo(struct net_device *netdev,
2420 struct ethtool_drvinfo *info)
2421{
2422 struct nic *nic = netdev_priv(netdev);
2423 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2424 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2425 strlcpy(info->bus_info, pci_name(nic->pdev),
2426 sizeof(info->bus_info));
2427}
2428
2429#define E100_PHY_REGS 0x1C
2430static int e100_get_regs_len(struct net_device *netdev)
2431{
2432 struct nic *nic = netdev_priv(netdev);
2433 return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2434}
2435
2436static void e100_get_regs(struct net_device *netdev,
2437 struct ethtool_regs *regs, void *p)
2438{
2439 struct nic *nic = netdev_priv(netdev);
2440 u32 *buff = p;
2441 int i;
2442
2443 regs->version = (1 << 24) | nic->pdev->revision;
2444 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2445 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2446 ioread16(&nic->csr->scb.status);
2447 for (i = E100_PHY_REGS; i >= 0; i--)
2448 buff[1 + E100_PHY_REGS - i] =
2449 mdio_read(netdev, nic->mii.phy_id, i);
2450 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2451 e100_exec_cb(nic, NULL, e100_dump);
2452 msleep(10);
2453 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2454 sizeof(nic->mem->dump_buf));
2455}
2456
2457static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2458{
2459 struct nic *nic = netdev_priv(netdev);
2460 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2461 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2462}
2463
2464static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2465{
2466 struct nic *nic = netdev_priv(netdev);
2467
2468 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2469 !device_can_wakeup(&nic->pdev->dev))
2470 return -EOPNOTSUPP;
2471
2472 if (wol->wolopts)
2473 nic->flags |= wol_magic;
2474 else
2475 nic->flags &= ~wol_magic;
2476
2477 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2478
2479 e100_exec_cb(nic, NULL, e100_configure);
2480
2481 return 0;
2482}
2483
2484static u32 e100_get_msglevel(struct net_device *netdev)
2485{
2486 struct nic *nic = netdev_priv(netdev);
2487 return nic->msg_enable;
2488}
2489
2490static void e100_set_msglevel(struct net_device *netdev, u32 value)
2491{
2492 struct nic *nic = netdev_priv(netdev);
2493 nic->msg_enable = value;
2494}
2495
2496static int e100_nway_reset(struct net_device *netdev)
2497{
2498 struct nic *nic = netdev_priv(netdev);
2499 return mii_nway_restart(&nic->mii);
2500}
2501
2502static u32 e100_get_link(struct net_device *netdev)
2503{
2504 struct nic *nic = netdev_priv(netdev);
2505 return mii_link_ok(&nic->mii);
2506}
2507
2508static int e100_get_eeprom_len(struct net_device *netdev)
2509{
2510 struct nic *nic = netdev_priv(netdev);
2511 return nic->eeprom_wc << 1;
2512}
2513
2514#define E100_EEPROM_MAGIC 0x1234
2515static int e100_get_eeprom(struct net_device *netdev,
2516 struct ethtool_eeprom *eeprom, u8 *bytes)
2517{
2518 struct nic *nic = netdev_priv(netdev);
2519
2520 eeprom->magic = E100_EEPROM_MAGIC;
2521 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2522
2523 return 0;
2524}
2525
2526static int e100_set_eeprom(struct net_device *netdev,
2527 struct ethtool_eeprom *eeprom, u8 *bytes)
2528{
2529 struct nic *nic = netdev_priv(netdev);
2530
2531 if (eeprom->magic != E100_EEPROM_MAGIC)
2532 return -EINVAL;
2533
2534 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2535
2536 return e100_eeprom_save(nic, eeprom->offset >> 1,
2537 (eeprom->len >> 1) + 1);
2538}
2539
2540static void e100_get_ringparam(struct net_device *netdev,
2541 struct ethtool_ringparam *ring)
2542{
2543 struct nic *nic = netdev_priv(netdev);
2544 struct param_range *rfds = &nic->params.rfds;
2545 struct param_range *cbs = &nic->params.cbs;
2546
2547 ring->rx_max_pending = rfds->max;
2548 ring->tx_max_pending = cbs->max;
2549 ring->rx_pending = rfds->count;
2550 ring->tx_pending = cbs->count;
2551}
2552
2553static int e100_set_ringparam(struct net_device *netdev,
2554 struct ethtool_ringparam *ring)
2555{
2556 struct nic *nic = netdev_priv(netdev);
2557 struct param_range *rfds = &nic->params.rfds;
2558 struct param_range *cbs = &nic->params.cbs;
2559
2560 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2561 return -EINVAL;
2562
2563 if (netif_running(netdev))
2564 e100_down(nic);
2565 rfds->count = max(ring->rx_pending, rfds->min);
2566 rfds->count = min(rfds->count, rfds->max);
2567 cbs->count = max(ring->tx_pending, cbs->min);
2568 cbs->count = min(cbs->count, cbs->max);
2569 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2570 rfds->count, cbs->count);
2571 if (netif_running(netdev))
2572 e100_up(nic);
2573
2574 return 0;
2575}
2576
2577static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2578 "Link test (on/offline)",
2579 "Eeprom test (on/offline)",
2580 "Self test (offline)",
2581 "Mac loopback (offline)",
2582 "Phy loopback (offline)",
2583};
2584#define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2585
2586static void e100_diag_test(struct net_device *netdev,
2587 struct ethtool_test *test, u64 *data)
2588{
2589 struct ethtool_cmd cmd;
2590 struct nic *nic = netdev_priv(netdev);
2591 int i, err;
2592
2593 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2594 data[0] = !mii_link_ok(&nic->mii);
2595 data[1] = e100_eeprom_load(nic);
2596 if (test->flags & ETH_TEST_FL_OFFLINE) {
2597
2598 /* save speed, duplex & autoneg settings */
2599 err = mii_ethtool_gset(&nic->mii, &cmd);
2600
2601 if (netif_running(netdev))
2602 e100_down(nic);
2603 data[2] = e100_self_test(nic);
2604 data[3] = e100_loopback_test(nic, lb_mac);
2605 data[4] = e100_loopback_test(nic, lb_phy);
2606
2607 /* restore speed, duplex & autoneg settings */
2608 err = mii_ethtool_sset(&nic->mii, &cmd);
2609
2610 if (netif_running(netdev))
2611 e100_up(nic);
2612 }
2613 for (i = 0; i < E100_TEST_LEN; i++)
2614 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2615
2616 msleep_interruptible(4 * 1000);
2617}
2618
2619static int e100_set_phys_id(struct net_device *netdev,
2620 enum ethtool_phys_id_state state)
2621{
2622 struct nic *nic = netdev_priv(netdev);
2623 enum led_state {
2624 led_on = 0x01,
2625 led_off = 0x04,
2626 led_on_559 = 0x05,
2627 led_on_557 = 0x07,
2628 };
2629 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2630 MII_LED_CONTROL;
2631 u16 leds = 0;
2632
2633 switch (state) {
2634 case ETHTOOL_ID_ACTIVE:
2635 return 2;
2636
2637 case ETHTOOL_ID_ON:
2638 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2639 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2640 break;
2641
2642 case ETHTOOL_ID_OFF:
2643 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2644 break;
2645
2646 case ETHTOOL_ID_INACTIVE:
2647 break;
2648 }
2649
2650 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2651 return 0;
2652}
2653
2654static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2655 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2656 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2657 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2658 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2659 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2660 "tx_heartbeat_errors", "tx_window_errors",
2661 /* device-specific stats */
2662 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2663 "tx_flow_control_pause", "rx_flow_control_pause",
2664 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2665 "rx_short_frame_errors", "rx_over_length_errors",
2666};
2667#define E100_NET_STATS_LEN 21
2668#define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2669
2670static int e100_get_sset_count(struct net_device *netdev, int sset)
2671{
2672 switch (sset) {
2673 case ETH_SS_TEST:
2674 return E100_TEST_LEN;
2675 case ETH_SS_STATS:
2676 return E100_STATS_LEN;
2677 default:
2678 return -EOPNOTSUPP;
2679 }
2680}
2681
2682static void e100_get_ethtool_stats(struct net_device *netdev,
2683 struct ethtool_stats *stats, u64 *data)
2684{
2685 struct nic *nic = netdev_priv(netdev);
2686 int i;
2687
2688 for (i = 0; i < E100_NET_STATS_LEN; i++)
2689 data[i] = ((unsigned long *)&netdev->stats)[i];
2690
2691 data[i++] = nic->tx_deferred;
2692 data[i++] = nic->tx_single_collisions;
2693 data[i++] = nic->tx_multiple_collisions;
2694 data[i++] = nic->tx_fc_pause;
2695 data[i++] = nic->rx_fc_pause;
2696 data[i++] = nic->rx_fc_unsupported;
2697 data[i++] = nic->tx_tco_frames;
2698 data[i++] = nic->rx_tco_frames;
2699 data[i++] = nic->rx_short_frame_errors;
2700 data[i++] = nic->rx_over_length_errors;
2701}
2702
2703static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2704{
2705 switch (stringset) {
2706 case ETH_SS_TEST:
2707 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2708 break;
2709 case ETH_SS_STATS:
2710 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2711 break;
2712 }
2713}
2714
2715static const struct ethtool_ops e100_ethtool_ops = {
2716 .get_settings = e100_get_settings,
2717 .set_settings = e100_set_settings,
2718 .get_drvinfo = e100_get_drvinfo,
2719 .get_regs_len = e100_get_regs_len,
2720 .get_regs = e100_get_regs,
2721 .get_wol = e100_get_wol,
2722 .set_wol = e100_set_wol,
2723 .get_msglevel = e100_get_msglevel,
2724 .set_msglevel = e100_set_msglevel,
2725 .nway_reset = e100_nway_reset,
2726 .get_link = e100_get_link,
2727 .get_eeprom_len = e100_get_eeprom_len,
2728 .get_eeprom = e100_get_eeprom,
2729 .set_eeprom = e100_set_eeprom,
2730 .get_ringparam = e100_get_ringparam,
2731 .set_ringparam = e100_set_ringparam,
2732 .self_test = e100_diag_test,
2733 .get_strings = e100_get_strings,
2734 .set_phys_id = e100_set_phys_id,
2735 .get_ethtool_stats = e100_get_ethtool_stats,
2736 .get_sset_count = e100_get_sset_count,
2737 .get_ts_info = ethtool_op_get_ts_info,
2738};
2739
2740static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2741{
2742 struct nic *nic = netdev_priv(netdev);
2743
2744 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2745}
2746
2747static int e100_alloc(struct nic *nic)
2748{
2749 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2750 &nic->dma_addr);
2751 return nic->mem ? 0 : -ENOMEM;
2752}
2753
2754static void e100_free(struct nic *nic)
2755{
2756 if (nic->mem) {
2757 pci_free_consistent(nic->pdev, sizeof(struct mem),
2758 nic->mem, nic->dma_addr);
2759 nic->mem = NULL;
2760 }
2761}
2762
2763static int e100_open(struct net_device *netdev)
2764{
2765 struct nic *nic = netdev_priv(netdev);
2766 int err = 0;
2767
2768 netif_carrier_off(netdev);
2769 if ((err = e100_up(nic)))
2770 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2771 return err;
2772}
2773
2774static int e100_close(struct net_device *netdev)
2775{
2776 e100_down(netdev_priv(netdev));
2777 return 0;
2778}
2779
2780static int e100_set_features(struct net_device *netdev,
2781 netdev_features_t features)
2782{
2783 struct nic *nic = netdev_priv(netdev);
2784 netdev_features_t changed = features ^ netdev->features;
2785
2786 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2787 return 0;
2788
2789 netdev->features = features;
2790 e100_exec_cb(nic, NULL, e100_configure);
2791 return 0;
2792}
2793
2794static const struct net_device_ops e100_netdev_ops = {
2795 .ndo_open = e100_open,
2796 .ndo_stop = e100_close,
2797 .ndo_start_xmit = e100_xmit_frame,
2798 .ndo_validate_addr = eth_validate_addr,
2799 .ndo_set_rx_mode = e100_set_multicast_list,
2800 .ndo_set_mac_address = e100_set_mac_address,
2801 .ndo_change_mtu = e100_change_mtu,
2802 .ndo_do_ioctl = e100_do_ioctl,
2803 .ndo_tx_timeout = e100_tx_timeout,
2804#ifdef CONFIG_NET_POLL_CONTROLLER
2805 .ndo_poll_controller = e100_netpoll,
2806#endif
2807 .ndo_set_features = e100_set_features,
2808};
2809
2810static int __devinit e100_probe(struct pci_dev *pdev,
2811 const struct pci_device_id *ent)
2812{
2813 struct net_device *netdev;
2814 struct nic *nic;
2815 int err;
2816
2817 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2818 return -ENOMEM;
2819
2820 netdev->hw_features |= NETIF_F_RXFCS;
2821 netdev->priv_flags |= IFF_SUPP_NOFCS;
2822 netdev->hw_features |= NETIF_F_RXALL;
2823
2824 netdev->netdev_ops = &e100_netdev_ops;
2825 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2826 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2827 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2828
2829 nic = netdev_priv(netdev);
2830 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2831 nic->netdev = netdev;
2832 nic->pdev = pdev;
2833 nic->msg_enable = (1 << debug) - 1;
2834 nic->mdio_ctrl = mdio_ctrl_hw;
2835 pci_set_drvdata(pdev, netdev);
2836
2837 if ((err = pci_enable_device(pdev))) {
2838 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2839 goto err_out_free_dev;
2840 }
2841
2842 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2843 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2844 err = -ENODEV;
2845 goto err_out_disable_pdev;
2846 }
2847
2848 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2849 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2850 goto err_out_disable_pdev;
2851 }
2852
2853 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2854 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2855 goto err_out_free_res;
2856 }
2857
2858 SET_NETDEV_DEV(netdev, &pdev->dev);
2859
2860 if (use_io)
2861 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2862
2863 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2864 if (!nic->csr) {
2865 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2866 err = -ENOMEM;
2867 goto err_out_free_res;
2868 }
2869
2870 if (ent->driver_data)
2871 nic->flags |= ich;
2872 else
2873 nic->flags &= ~ich;
2874
2875 e100_get_defaults(nic);
2876
2877 /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2878 if (nic->mac < mac_82558_D101_A4)
2879 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2880
2881 /* locks must be initialized before calling hw_reset */
2882 spin_lock_init(&nic->cb_lock);
2883 spin_lock_init(&nic->cmd_lock);
2884 spin_lock_init(&nic->mdio_lock);
2885
2886 /* Reset the device before pci_set_master() in case device is in some
2887 * funky state and has an interrupt pending - hint: we don't have the
2888 * interrupt handler registered yet. */
2889 e100_hw_reset(nic);
2890
2891 pci_set_master(pdev);
2892
2893 init_timer(&nic->watchdog);
2894 nic->watchdog.function = e100_watchdog;
2895 nic->watchdog.data = (unsigned long)nic;
2896
2897 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2898
2899 if ((err = e100_alloc(nic))) {
2900 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2901 goto err_out_iounmap;
2902 }
2903
2904 if ((err = e100_eeprom_load(nic)))
2905 goto err_out_free;
2906
2907 e100_phy_init(nic);
2908
2909 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2910 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2911 if (!is_valid_ether_addr(netdev->perm_addr)) {
2912 if (!eeprom_bad_csum_allow) {
2913 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2914 err = -EAGAIN;
2915 goto err_out_free;
2916 } else {
2917 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2918 }
2919 }
2920
2921 /* Wol magic packet can be enabled from eeprom */
2922 if ((nic->mac >= mac_82558_D101_A4) &&
2923 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2924 nic->flags |= wol_magic;
2925 device_set_wakeup_enable(&pdev->dev, true);
2926 }
2927
2928 /* ack any pending wake events, disable PME */
2929 pci_pme_active(pdev, false);
2930
2931 strcpy(netdev->name, "eth%d");
2932 if ((err = register_netdev(netdev))) {
2933 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2934 goto err_out_free;
2935 }
2936 nic->cbs_pool = pci_pool_create(netdev->name,
2937 nic->pdev,
2938 nic->params.cbs.max * sizeof(struct cb),
2939 sizeof(u32),
2940 0);
2941 netif_info(nic, probe, nic->netdev,
2942 "addr 0x%llx, irq %d, MAC addr %pM\n",
2943 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2944 pdev->irq, netdev->dev_addr);
2945
2946 return 0;
2947
2948err_out_free:
2949 e100_free(nic);
2950err_out_iounmap:
2951 pci_iounmap(pdev, nic->csr);
2952err_out_free_res:
2953 pci_release_regions(pdev);
2954err_out_disable_pdev:
2955 pci_disable_device(pdev);
2956err_out_free_dev:
2957 pci_set_drvdata(pdev, NULL);
2958 free_netdev(netdev);
2959 return err;
2960}
2961
2962static void __devexit e100_remove(struct pci_dev *pdev)
2963{
2964 struct net_device *netdev = pci_get_drvdata(pdev);
2965
2966 if (netdev) {
2967 struct nic *nic = netdev_priv(netdev);
2968 unregister_netdev(netdev);
2969 e100_free(nic);
2970 pci_iounmap(pdev, nic->csr);
2971 pci_pool_destroy(nic->cbs_pool);
2972 free_netdev(netdev);
2973 pci_release_regions(pdev);
2974 pci_disable_device(pdev);
2975 pci_set_drvdata(pdev, NULL);
2976 }
2977}
2978
2979#define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
2980#define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
2981#define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
2982static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
2983{
2984 struct net_device *netdev = pci_get_drvdata(pdev);
2985 struct nic *nic = netdev_priv(netdev);
2986
2987 if (netif_running(netdev))
2988 e100_down(nic);
2989 netif_device_detach(netdev);
2990
2991 pci_save_state(pdev);
2992
2993 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2994 /* enable reverse auto-negotiation */
2995 if (nic->phy == phy_82552_v) {
2996 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
2997 E100_82552_SMARTSPEED);
2998
2999 mdio_write(netdev, nic->mii.phy_id,
3000 E100_82552_SMARTSPEED, smartspeed |
3001 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3002 }
3003 *enable_wake = true;
3004 } else {
3005 *enable_wake = false;
3006 }
3007
3008 pci_disable_device(pdev);
3009}
3010
3011static int __e100_power_off(struct pci_dev *pdev, bool wake)
3012{
3013 if (wake)
3014 return pci_prepare_to_sleep(pdev);
3015
3016 pci_wake_from_d3(pdev, false);
3017 pci_set_power_state(pdev, PCI_D3hot);
3018
3019 return 0;
3020}
3021
3022#ifdef CONFIG_PM
3023static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3024{
3025 bool wake;
3026 __e100_shutdown(pdev, &wake);
3027 return __e100_power_off(pdev, wake);
3028}
3029
3030static int e100_resume(struct pci_dev *pdev)
3031{
3032 struct net_device *netdev = pci_get_drvdata(pdev);
3033 struct nic *nic = netdev_priv(netdev);
3034
3035 pci_set_power_state(pdev, PCI_D0);
3036 pci_restore_state(pdev);
3037 /* ack any pending wake events, disable PME */
3038 pci_enable_wake(pdev, 0, 0);
3039
3040 /* disable reverse auto-negotiation */
3041 if (nic->phy == phy_82552_v) {
3042 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3043 E100_82552_SMARTSPEED);
3044
3045 mdio_write(netdev, nic->mii.phy_id,
3046 E100_82552_SMARTSPEED,
3047 smartspeed & ~(E100_82552_REV_ANEG));
3048 }
3049
3050 netif_device_attach(netdev);
3051 if (netif_running(netdev))
3052 e100_up(nic);
3053
3054 return 0;
3055}
3056#endif /* CONFIG_PM */
3057
3058static void e100_shutdown(struct pci_dev *pdev)
3059{
3060 bool wake;
3061 __e100_shutdown(pdev, &wake);
3062 if (system_state == SYSTEM_POWER_OFF)
3063 __e100_power_off(pdev, wake);
3064}
3065
3066/* ------------------ PCI Error Recovery infrastructure -------------- */
3067/**
3068 * e100_io_error_detected - called when PCI error is detected.
3069 * @pdev: Pointer to PCI device
3070 * @state: The current pci connection state
3071 */
3072static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3073{
3074 struct net_device *netdev = pci_get_drvdata(pdev);
3075 struct nic *nic = netdev_priv(netdev);
3076
3077 netif_device_detach(netdev);
3078
3079 if (state == pci_channel_io_perm_failure)
3080 return PCI_ERS_RESULT_DISCONNECT;
3081
3082 if (netif_running(netdev))
3083 e100_down(nic);
3084 pci_disable_device(pdev);
3085
3086 /* Request a slot reset. */
3087 return PCI_ERS_RESULT_NEED_RESET;
3088}
3089
3090/**
3091 * e100_io_slot_reset - called after the pci bus has been reset.
3092 * @pdev: Pointer to PCI device
3093 *
3094 * Restart the card from scratch.
3095 */
3096static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3097{
3098 struct net_device *netdev = pci_get_drvdata(pdev);
3099 struct nic *nic = netdev_priv(netdev);
3100
3101 if (pci_enable_device(pdev)) {
3102 pr_err("Cannot re-enable PCI device after reset\n");
3103 return PCI_ERS_RESULT_DISCONNECT;
3104 }
3105 pci_set_master(pdev);
3106
3107 /* Only one device per card can do a reset */
3108 if (0 != PCI_FUNC(pdev->devfn))
3109 return PCI_ERS_RESULT_RECOVERED;
3110 e100_hw_reset(nic);
3111 e100_phy_init(nic);
3112
3113 return PCI_ERS_RESULT_RECOVERED;
3114}
3115
3116/**
3117 * e100_io_resume - resume normal operations
3118 * @pdev: Pointer to PCI device
3119 *
3120 * Resume normal operations after an error recovery
3121 * sequence has been completed.
3122 */
3123static void e100_io_resume(struct pci_dev *pdev)
3124{
3125 struct net_device *netdev = pci_get_drvdata(pdev);
3126 struct nic *nic = netdev_priv(netdev);
3127
3128 /* ack any pending wake events, disable PME */
3129 pci_enable_wake(pdev, 0, 0);
3130
3131 netif_device_attach(netdev);
3132 if (netif_running(netdev)) {
3133 e100_open(netdev);
3134 mod_timer(&nic->watchdog, jiffies);
3135 }
3136}
3137
3138static struct pci_error_handlers e100_err_handler = {
3139 .error_detected = e100_io_error_detected,
3140 .slot_reset = e100_io_slot_reset,
3141 .resume = e100_io_resume,
3142};
3143
3144static struct pci_driver e100_driver = {
3145 .name = DRV_NAME,
3146 .id_table = e100_id_table,
3147 .probe = e100_probe,
3148 .remove = __devexit_p(e100_remove),
3149#ifdef CONFIG_PM
3150 /* Power Management hooks */
3151 .suspend = e100_suspend,
3152 .resume = e100_resume,
3153#endif
3154 .shutdown = e100_shutdown,
3155 .err_handler = &e100_err_handler,
3156};
3157
3158static int __init e100_init_module(void)
3159{
3160 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3161 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3162 pr_info("%s\n", DRV_COPYRIGHT);
3163 }
3164 return pci_register_driver(&e100_driver);
3165}
3166
3167static void __exit e100_cleanup_module(void)
3168{
3169 pci_unregister_driver(&e100_driver);
3170}
3171
3172module_init(e100_init_module);
3173module_exit(e100_cleanup_module);