Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4#include "e1000.h"
5#include <net/ip6_checksum.h>
6#include <linux/io.h>
7#include <linux/prefetch.h>
8#include <linux/bitops.h>
9#include <linux/if_vlan.h>
10
11char e1000_driver_name[] = "e1000";
12static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14
15/* e1000_pci_tbl - PCI Device ID Table
16 *
17 * Last entry must be all 0s
18 *
19 * Macro expands to...
20 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21 */
22static const struct pci_device_id e1000_pci_tbl[] = {
23 INTEL_E1000_ETHERNET_DEVICE(0x1000),
24 INTEL_E1000_ETHERNET_DEVICE(0x1001),
25 INTEL_E1000_ETHERNET_DEVICE(0x1004),
26 INTEL_E1000_ETHERNET_DEVICE(0x1008),
27 INTEL_E1000_ETHERNET_DEVICE(0x1009),
28 INTEL_E1000_ETHERNET_DEVICE(0x100C),
29 INTEL_E1000_ETHERNET_DEVICE(0x100D),
30 INTEL_E1000_ETHERNET_DEVICE(0x100E),
31 INTEL_E1000_ETHERNET_DEVICE(0x100F),
32 INTEL_E1000_ETHERNET_DEVICE(0x1010),
33 INTEL_E1000_ETHERNET_DEVICE(0x1011),
34 INTEL_E1000_ETHERNET_DEVICE(0x1012),
35 INTEL_E1000_ETHERNET_DEVICE(0x1013),
36 INTEL_E1000_ETHERNET_DEVICE(0x1014),
37 INTEL_E1000_ETHERNET_DEVICE(0x1015),
38 INTEL_E1000_ETHERNET_DEVICE(0x1016),
39 INTEL_E1000_ETHERNET_DEVICE(0x1017),
40 INTEL_E1000_ETHERNET_DEVICE(0x1018),
41 INTEL_E1000_ETHERNET_DEVICE(0x1019),
42 INTEL_E1000_ETHERNET_DEVICE(0x101A),
43 INTEL_E1000_ETHERNET_DEVICE(0x101D),
44 INTEL_E1000_ETHERNET_DEVICE(0x101E),
45 INTEL_E1000_ETHERNET_DEVICE(0x1026),
46 INTEL_E1000_ETHERNET_DEVICE(0x1027),
47 INTEL_E1000_ETHERNET_DEVICE(0x1028),
48 INTEL_E1000_ETHERNET_DEVICE(0x1075),
49 INTEL_E1000_ETHERNET_DEVICE(0x1076),
50 INTEL_E1000_ETHERNET_DEVICE(0x1077),
51 INTEL_E1000_ETHERNET_DEVICE(0x1078),
52 INTEL_E1000_ETHERNET_DEVICE(0x1079),
53 INTEL_E1000_ETHERNET_DEVICE(0x107A),
54 INTEL_E1000_ETHERNET_DEVICE(0x107B),
55 INTEL_E1000_ETHERNET_DEVICE(0x107C),
56 INTEL_E1000_ETHERNET_DEVICE(0x108A),
57 INTEL_E1000_ETHERNET_DEVICE(0x1099),
58 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60 /* required last entry */
61 {0,}
62};
63
64MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65
66int e1000_up(struct e1000_adapter *adapter);
67void e1000_down(struct e1000_adapter *adapter);
68void e1000_reinit_locked(struct e1000_adapter *adapter);
69void e1000_reset(struct e1000_adapter *adapter);
70int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75 struct e1000_tx_ring *txdr);
76static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77 struct e1000_rx_ring *rxdr);
78static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79 struct e1000_tx_ring *tx_ring);
80static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81 struct e1000_rx_ring *rx_ring);
82void e1000_update_stats(struct e1000_adapter *adapter);
83
84static int e1000_init_module(void);
85static void e1000_exit_module(void);
86static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87static void e1000_remove(struct pci_dev *pdev);
88static int e1000_alloc_queues(struct e1000_adapter *adapter);
89static int e1000_sw_init(struct e1000_adapter *adapter);
90int e1000_open(struct net_device *netdev);
91int e1000_close(struct net_device *netdev);
92static void e1000_configure_tx(struct e1000_adapter *adapter);
93static void e1000_configure_rx(struct e1000_adapter *adapter);
94static void e1000_setup_rctl(struct e1000_adapter *adapter);
95static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98 struct e1000_tx_ring *tx_ring);
99static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100 struct e1000_rx_ring *rx_ring);
101static void e1000_set_rx_mode(struct net_device *netdev);
102static void e1000_update_phy_info_task(struct work_struct *work);
103static void e1000_watchdog(struct work_struct *work);
104static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106 struct net_device *netdev);
107static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108static int e1000_set_mac(struct net_device *netdev, void *p);
109static irqreturn_t e1000_intr(int irq, void *data);
110static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111 struct e1000_tx_ring *tx_ring);
112static int e1000_clean(struct napi_struct *napi, int budget);
113static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114 struct e1000_rx_ring *rx_ring,
115 int *work_done, int work_to_do);
116static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117 struct e1000_rx_ring *rx_ring,
118 int *work_done, int work_to_do);
119static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120 struct e1000_rx_ring *rx_ring,
121 int cleaned_count)
122{
123}
124static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125 struct e1000_rx_ring *rx_ring,
126 int cleaned_count);
127static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128 struct e1000_rx_ring *rx_ring,
129 int cleaned_count);
130static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132 int cmd);
133static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136static void e1000_reset_task(struct work_struct *work);
137static void e1000_smartspeed(struct e1000_adapter *adapter);
138static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139 struct sk_buff *skb);
140
141static bool e1000_vlan_used(struct e1000_adapter *adapter);
142static void e1000_vlan_mode(struct net_device *netdev,
143 netdev_features_t features);
144static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145 bool filter_on);
146static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147 __be16 proto, u16 vid);
148static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149 __be16 proto, u16 vid);
150static void e1000_restore_vlan(struct e1000_adapter *adapter);
151
152static int __maybe_unused e1000_suspend(struct device *dev);
153static int __maybe_unused e1000_resume(struct device *dev);
154static void e1000_shutdown(struct pci_dev *pdev);
155
156#ifdef CONFIG_NET_POLL_CONTROLLER
157/* for netdump / net console */
158static void e1000_netpoll (struct net_device *netdev);
159#endif
160
161#define COPYBREAK_DEFAULT 256
162static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163module_param(copybreak, uint, 0644);
164MODULE_PARM_DESC(copybreak,
165 "Maximum size of packet that is copied to a new buffer on receive");
166
167static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168 pci_channel_state_t state);
169static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170static void e1000_io_resume(struct pci_dev *pdev);
171
172static const struct pci_error_handlers e1000_err_handler = {
173 .error_detected = e1000_io_error_detected,
174 .slot_reset = e1000_io_slot_reset,
175 .resume = e1000_io_resume,
176};
177
178static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179
180static struct pci_driver e1000_driver = {
181 .name = e1000_driver_name,
182 .id_table = e1000_pci_tbl,
183 .probe = e1000_probe,
184 .remove = e1000_remove,
185 .driver = {
186 .pm = &e1000_pm_ops,
187 },
188 .shutdown = e1000_shutdown,
189 .err_handler = &e1000_err_handler
190};
191
192MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
193MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
194MODULE_LICENSE("GPL v2");
195
196#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
197static int debug = -1;
198module_param(debug, int, 0);
199MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
200
201/**
202 * e1000_get_hw_dev - helper function for getting netdev
203 * @hw: pointer to HW struct
204 *
205 * return device used by hardware layer to print debugging information
206 *
207 **/
208struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
209{
210 struct e1000_adapter *adapter = hw->back;
211 return adapter->netdev;
212}
213
214/**
215 * e1000_init_module - Driver Registration Routine
216 *
217 * e1000_init_module is the first routine called when the driver is
218 * loaded. All it does is register with the PCI subsystem.
219 **/
220static int __init e1000_init_module(void)
221{
222 int ret;
223 pr_info("%s\n", e1000_driver_string);
224
225 pr_info("%s\n", e1000_copyright);
226
227 ret = pci_register_driver(&e1000_driver);
228 if (copybreak != COPYBREAK_DEFAULT) {
229 if (copybreak == 0)
230 pr_info("copybreak disabled\n");
231 else
232 pr_info("copybreak enabled for "
233 "packets <= %u bytes\n", copybreak);
234 }
235 return ret;
236}
237
238module_init(e1000_init_module);
239
240/**
241 * e1000_exit_module - Driver Exit Cleanup Routine
242 *
243 * e1000_exit_module is called just before the driver is removed
244 * from memory.
245 **/
246static void __exit e1000_exit_module(void)
247{
248 pci_unregister_driver(&e1000_driver);
249}
250
251module_exit(e1000_exit_module);
252
253static int e1000_request_irq(struct e1000_adapter *adapter)
254{
255 struct net_device *netdev = adapter->netdev;
256 irq_handler_t handler = e1000_intr;
257 int irq_flags = IRQF_SHARED;
258 int err;
259
260 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
261 netdev);
262 if (err) {
263 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
264 }
265
266 return err;
267}
268
269static void e1000_free_irq(struct e1000_adapter *adapter)
270{
271 struct net_device *netdev = adapter->netdev;
272
273 free_irq(adapter->pdev->irq, netdev);
274}
275
276/**
277 * e1000_irq_disable - Mask off interrupt generation on the NIC
278 * @adapter: board private structure
279 **/
280static void e1000_irq_disable(struct e1000_adapter *adapter)
281{
282 struct e1000_hw *hw = &adapter->hw;
283
284 ew32(IMC, ~0);
285 E1000_WRITE_FLUSH();
286 synchronize_irq(adapter->pdev->irq);
287}
288
289/**
290 * e1000_irq_enable - Enable default interrupt generation settings
291 * @adapter: board private structure
292 **/
293static void e1000_irq_enable(struct e1000_adapter *adapter)
294{
295 struct e1000_hw *hw = &adapter->hw;
296
297 ew32(IMS, IMS_ENABLE_MASK);
298 E1000_WRITE_FLUSH();
299}
300
301static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
302{
303 struct e1000_hw *hw = &adapter->hw;
304 struct net_device *netdev = adapter->netdev;
305 u16 vid = hw->mng_cookie.vlan_id;
306 u16 old_vid = adapter->mng_vlan_id;
307
308 if (!e1000_vlan_used(adapter))
309 return;
310
311 if (!test_bit(vid, adapter->active_vlans)) {
312 if (hw->mng_cookie.status &
313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
314 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
315 adapter->mng_vlan_id = vid;
316 } else {
317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
318 }
319 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
320 (vid != old_vid) &&
321 !test_bit(old_vid, adapter->active_vlans))
322 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
323 old_vid);
324 } else {
325 adapter->mng_vlan_id = vid;
326 }
327}
328
329static void e1000_init_manageability(struct e1000_adapter *adapter)
330{
331 struct e1000_hw *hw = &adapter->hw;
332
333 if (adapter->en_mng_pt) {
334 u32 manc = er32(MANC);
335
336 /* disable hardware interception of ARP */
337 manc &= ~(E1000_MANC_ARP_EN);
338
339 ew32(MANC, manc);
340 }
341}
342
343static void e1000_release_manageability(struct e1000_adapter *adapter)
344{
345 struct e1000_hw *hw = &adapter->hw;
346
347 if (adapter->en_mng_pt) {
348 u32 manc = er32(MANC);
349
350 /* re-enable hardware interception of ARP */
351 manc |= E1000_MANC_ARP_EN;
352
353 ew32(MANC, manc);
354 }
355}
356
357/**
358 * e1000_configure - configure the hardware for RX and TX
359 * @adapter: private board structure
360 **/
361static void e1000_configure(struct e1000_adapter *adapter)
362{
363 struct net_device *netdev = adapter->netdev;
364 int i;
365
366 e1000_set_rx_mode(netdev);
367
368 e1000_restore_vlan(adapter);
369 e1000_init_manageability(adapter);
370
371 e1000_configure_tx(adapter);
372 e1000_setup_rctl(adapter);
373 e1000_configure_rx(adapter);
374 /* call E1000_DESC_UNUSED which always leaves
375 * at least 1 descriptor unused to make sure
376 * next_to_use != next_to_clean
377 */
378 for (i = 0; i < adapter->num_rx_queues; i++) {
379 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
380 adapter->alloc_rx_buf(adapter, ring,
381 E1000_DESC_UNUSED(ring));
382 }
383}
384
385int e1000_up(struct e1000_adapter *adapter)
386{
387 struct e1000_hw *hw = &adapter->hw;
388
389 /* hardware has been reset, we need to reload some things */
390 e1000_configure(adapter);
391
392 clear_bit(__E1000_DOWN, &adapter->flags);
393
394 napi_enable(&adapter->napi);
395
396 e1000_irq_enable(adapter);
397
398 netif_wake_queue(adapter->netdev);
399
400 /* fire a link change interrupt to start the watchdog */
401 ew32(ICS, E1000_ICS_LSC);
402 return 0;
403}
404
405/**
406 * e1000_power_up_phy - restore link in case the phy was powered down
407 * @adapter: address of board private structure
408 *
409 * The phy may be powered down to save power and turn off link when the
410 * driver is unloaded and wake on lan is not enabled (among others)
411 * *** this routine MUST be followed by a call to e1000_reset ***
412 **/
413void e1000_power_up_phy(struct e1000_adapter *adapter)
414{
415 struct e1000_hw *hw = &adapter->hw;
416 u16 mii_reg = 0;
417
418 /* Just clear the power down bit to wake the phy back up */
419 if (hw->media_type == e1000_media_type_copper) {
420 /* according to the manual, the phy will retain its
421 * settings across a power-down/up cycle
422 */
423 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
424 mii_reg &= ~MII_CR_POWER_DOWN;
425 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
426 }
427}
428
429static void e1000_power_down_phy(struct e1000_adapter *adapter)
430{
431 struct e1000_hw *hw = &adapter->hw;
432
433 /* Power down the PHY so no link is implied when interface is down *
434 * The PHY cannot be powered down if any of the following is true *
435 * (a) WoL is enabled
436 * (b) AMT is active
437 * (c) SoL/IDER session is active
438 */
439 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
440 hw->media_type == e1000_media_type_copper) {
441 u16 mii_reg = 0;
442
443 switch (hw->mac_type) {
444 case e1000_82540:
445 case e1000_82545:
446 case e1000_82545_rev_3:
447 case e1000_82546:
448 case e1000_ce4100:
449 case e1000_82546_rev_3:
450 case e1000_82541:
451 case e1000_82541_rev_2:
452 case e1000_82547:
453 case e1000_82547_rev_2:
454 if (er32(MANC) & E1000_MANC_SMBUS_EN)
455 goto out;
456 break;
457 default:
458 goto out;
459 }
460 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
461 mii_reg |= MII_CR_POWER_DOWN;
462 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
463 msleep(1);
464 }
465out:
466 return;
467}
468
469static void e1000_down_and_stop(struct e1000_adapter *adapter)
470{
471 set_bit(__E1000_DOWN, &adapter->flags);
472
473 cancel_delayed_work_sync(&adapter->watchdog_task);
474
475 /*
476 * Since the watchdog task can reschedule other tasks, we should cancel
477 * it first, otherwise we can run into the situation when a work is
478 * still running after the adapter has been turned down.
479 */
480
481 cancel_delayed_work_sync(&adapter->phy_info_task);
482 cancel_delayed_work_sync(&adapter->fifo_stall_task);
483
484 /* Only kill reset task if adapter is not resetting */
485 if (!test_bit(__E1000_RESETTING, &adapter->flags))
486 cancel_work_sync(&adapter->reset_task);
487}
488
489void e1000_down(struct e1000_adapter *adapter)
490{
491 struct e1000_hw *hw = &adapter->hw;
492 struct net_device *netdev = adapter->netdev;
493 u32 rctl, tctl;
494
495 /* disable receives in the hardware */
496 rctl = er32(RCTL);
497 ew32(RCTL, rctl & ~E1000_RCTL_EN);
498 /* flush and sleep below */
499
500 netif_tx_disable(netdev);
501
502 /* disable transmits in the hardware */
503 tctl = er32(TCTL);
504 tctl &= ~E1000_TCTL_EN;
505 ew32(TCTL, tctl);
506 /* flush both disables and wait for them to finish */
507 E1000_WRITE_FLUSH();
508 msleep(10);
509
510 /* Set the carrier off after transmits have been disabled in the
511 * hardware, to avoid race conditions with e1000_watchdog() (which
512 * may be running concurrently to us, checking for the carrier
513 * bit to decide whether it should enable transmits again). Such
514 * a race condition would result into transmission being disabled
515 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
516 */
517 netif_carrier_off(netdev);
518
519 napi_disable(&adapter->napi);
520
521 e1000_irq_disable(adapter);
522
523 /* Setting DOWN must be after irq_disable to prevent
524 * a screaming interrupt. Setting DOWN also prevents
525 * tasks from rescheduling.
526 */
527 e1000_down_and_stop(adapter);
528
529 adapter->link_speed = 0;
530 adapter->link_duplex = 0;
531
532 e1000_reset(adapter);
533 e1000_clean_all_tx_rings(adapter);
534 e1000_clean_all_rx_rings(adapter);
535}
536
537void e1000_reinit_locked(struct e1000_adapter *adapter)
538{
539 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
540 msleep(1);
541
542 /* only run the task if not already down */
543 if (!test_bit(__E1000_DOWN, &adapter->flags)) {
544 e1000_down(adapter);
545 e1000_up(adapter);
546 }
547
548 clear_bit(__E1000_RESETTING, &adapter->flags);
549}
550
551void e1000_reset(struct e1000_adapter *adapter)
552{
553 struct e1000_hw *hw = &adapter->hw;
554 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
555 bool legacy_pba_adjust = false;
556 u16 hwm;
557
558 /* Repartition Pba for greater than 9k mtu
559 * To take effect CTRL.RST is required.
560 */
561
562 switch (hw->mac_type) {
563 case e1000_82542_rev2_0:
564 case e1000_82542_rev2_1:
565 case e1000_82543:
566 case e1000_82544:
567 case e1000_82540:
568 case e1000_82541:
569 case e1000_82541_rev_2:
570 legacy_pba_adjust = true;
571 pba = E1000_PBA_48K;
572 break;
573 case e1000_82545:
574 case e1000_82545_rev_3:
575 case e1000_82546:
576 case e1000_ce4100:
577 case e1000_82546_rev_3:
578 pba = E1000_PBA_48K;
579 break;
580 case e1000_82547:
581 case e1000_82547_rev_2:
582 legacy_pba_adjust = true;
583 pba = E1000_PBA_30K;
584 break;
585 case e1000_undefined:
586 case e1000_num_macs:
587 break;
588 }
589
590 if (legacy_pba_adjust) {
591 if (hw->max_frame_size > E1000_RXBUFFER_8192)
592 pba -= 8; /* allocate more FIFO for Tx */
593
594 if (hw->mac_type == e1000_82547) {
595 adapter->tx_fifo_head = 0;
596 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
597 adapter->tx_fifo_size =
598 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
599 atomic_set(&adapter->tx_fifo_stall, 0);
600 }
601 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
602 /* adjust PBA for jumbo frames */
603 ew32(PBA, pba);
604
605 /* To maintain wire speed transmits, the Tx FIFO should be
606 * large enough to accommodate two full transmit packets,
607 * rounded up to the next 1KB and expressed in KB. Likewise,
608 * the Rx FIFO should be large enough to accommodate at least
609 * one full receive packet and is similarly rounded up and
610 * expressed in KB.
611 */
612 pba = er32(PBA);
613 /* upper 16 bits has Tx packet buffer allocation size in KB */
614 tx_space = pba >> 16;
615 /* lower 16 bits has Rx packet buffer allocation size in KB */
616 pba &= 0xffff;
617 /* the Tx fifo also stores 16 bytes of information about the Tx
618 * but don't include ethernet FCS because hardware appends it
619 */
620 min_tx_space = (hw->max_frame_size +
621 sizeof(struct e1000_tx_desc) -
622 ETH_FCS_LEN) * 2;
623 min_tx_space = ALIGN(min_tx_space, 1024);
624 min_tx_space >>= 10;
625 /* software strips receive CRC, so leave room for it */
626 min_rx_space = hw->max_frame_size;
627 min_rx_space = ALIGN(min_rx_space, 1024);
628 min_rx_space >>= 10;
629
630 /* If current Tx allocation is less than the min Tx FIFO size,
631 * and the min Tx FIFO size is less than the current Rx FIFO
632 * allocation, take space away from current Rx allocation
633 */
634 if (tx_space < min_tx_space &&
635 ((min_tx_space - tx_space) < pba)) {
636 pba = pba - (min_tx_space - tx_space);
637
638 /* PCI/PCIx hardware has PBA alignment constraints */
639 switch (hw->mac_type) {
640 case e1000_82545 ... e1000_82546_rev_3:
641 pba &= ~(E1000_PBA_8K - 1);
642 break;
643 default:
644 break;
645 }
646
647 /* if short on Rx space, Rx wins and must trump Tx
648 * adjustment or use Early Receive if available
649 */
650 if (pba < min_rx_space)
651 pba = min_rx_space;
652 }
653 }
654
655 ew32(PBA, pba);
656
657 /* flow control settings:
658 * The high water mark must be low enough to fit one full frame
659 * (or the size used for early receive) above it in the Rx FIFO.
660 * Set it to the lower of:
661 * - 90% of the Rx FIFO size, and
662 * - the full Rx FIFO size minus the early receive size (for parts
663 * with ERT support assuming ERT set to E1000_ERT_2048), or
664 * - the full Rx FIFO size minus one full frame
665 */
666 hwm = min(((pba << 10) * 9 / 10),
667 ((pba << 10) - hw->max_frame_size));
668
669 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
670 hw->fc_low_water = hw->fc_high_water - 8;
671 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
672 hw->fc_send_xon = 1;
673 hw->fc = hw->original_fc;
674
675 /* Allow time for pending master requests to run */
676 e1000_reset_hw(hw);
677 if (hw->mac_type >= e1000_82544)
678 ew32(WUC, 0);
679
680 if (e1000_init_hw(hw))
681 e_dev_err("Hardware Error\n");
682 e1000_update_mng_vlan(adapter);
683
684 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
685 if (hw->mac_type >= e1000_82544 &&
686 hw->autoneg == 1 &&
687 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
688 u32 ctrl = er32(CTRL);
689 /* clear phy power management bit if we are in gig only mode,
690 * which if enabled will attempt negotiation to 100Mb, which
691 * can cause a loss of link at power off or driver unload
692 */
693 ctrl &= ~E1000_CTRL_SWDPIN3;
694 ew32(CTRL, ctrl);
695 }
696
697 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
698 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
699
700 e1000_reset_adaptive(hw);
701 e1000_phy_get_info(hw, &adapter->phy_info);
702
703 e1000_release_manageability(adapter);
704}
705
706/* Dump the eeprom for users having checksum issues */
707static void e1000_dump_eeprom(struct e1000_adapter *adapter)
708{
709 struct net_device *netdev = adapter->netdev;
710 struct ethtool_eeprom eeprom;
711 const struct ethtool_ops *ops = netdev->ethtool_ops;
712 u8 *data;
713 int i;
714 u16 csum_old, csum_new = 0;
715
716 eeprom.len = ops->get_eeprom_len(netdev);
717 eeprom.offset = 0;
718
719 data = kmalloc(eeprom.len, GFP_KERNEL);
720 if (!data)
721 return;
722
723 ops->get_eeprom(netdev, &eeprom, data);
724
725 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
726 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
727 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
728 csum_new += data[i] + (data[i + 1] << 8);
729 csum_new = EEPROM_SUM - csum_new;
730
731 pr_err("/*********************/\n");
732 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
733 pr_err("Calculated : 0x%04x\n", csum_new);
734
735 pr_err("Offset Values\n");
736 pr_err("======== ======\n");
737 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
738
739 pr_err("Include this output when contacting your support provider.\n");
740 pr_err("This is not a software error! Something bad happened to\n");
741 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
742 pr_err("result in further problems, possibly loss of data,\n");
743 pr_err("corruption or system hangs!\n");
744 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
745 pr_err("which is invalid and requires you to set the proper MAC\n");
746 pr_err("address manually before continuing to enable this network\n");
747 pr_err("device. Please inspect the EEPROM dump and report the\n");
748 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
749 pr_err("/*********************/\n");
750
751 kfree(data);
752}
753
754/**
755 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
756 * @pdev: PCI device information struct
757 *
758 * Return true if an adapter needs ioport resources
759 **/
760static int e1000_is_need_ioport(struct pci_dev *pdev)
761{
762 switch (pdev->device) {
763 case E1000_DEV_ID_82540EM:
764 case E1000_DEV_ID_82540EM_LOM:
765 case E1000_DEV_ID_82540EP:
766 case E1000_DEV_ID_82540EP_LOM:
767 case E1000_DEV_ID_82540EP_LP:
768 case E1000_DEV_ID_82541EI:
769 case E1000_DEV_ID_82541EI_MOBILE:
770 case E1000_DEV_ID_82541ER:
771 case E1000_DEV_ID_82541ER_LOM:
772 case E1000_DEV_ID_82541GI:
773 case E1000_DEV_ID_82541GI_LF:
774 case E1000_DEV_ID_82541GI_MOBILE:
775 case E1000_DEV_ID_82544EI_COPPER:
776 case E1000_DEV_ID_82544EI_FIBER:
777 case E1000_DEV_ID_82544GC_COPPER:
778 case E1000_DEV_ID_82544GC_LOM:
779 case E1000_DEV_ID_82545EM_COPPER:
780 case E1000_DEV_ID_82545EM_FIBER:
781 case E1000_DEV_ID_82546EB_COPPER:
782 case E1000_DEV_ID_82546EB_FIBER:
783 case E1000_DEV_ID_82546EB_QUAD_COPPER:
784 return true;
785 default:
786 return false;
787 }
788}
789
790static netdev_features_t e1000_fix_features(struct net_device *netdev,
791 netdev_features_t features)
792{
793 /* Since there is no support for separate Rx/Tx vlan accel
794 * enable/disable make sure Tx flag is always in same state as Rx.
795 */
796 if (features & NETIF_F_HW_VLAN_CTAG_RX)
797 features |= NETIF_F_HW_VLAN_CTAG_TX;
798 else
799 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
800
801 return features;
802}
803
804static int e1000_set_features(struct net_device *netdev,
805 netdev_features_t features)
806{
807 struct e1000_adapter *adapter = netdev_priv(netdev);
808 netdev_features_t changed = features ^ netdev->features;
809
810 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
811 e1000_vlan_mode(netdev, features);
812
813 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
814 return 0;
815
816 netdev->features = features;
817 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
818
819 if (netif_running(netdev))
820 e1000_reinit_locked(adapter);
821 else
822 e1000_reset(adapter);
823
824 return 1;
825}
826
827static const struct net_device_ops e1000_netdev_ops = {
828 .ndo_open = e1000_open,
829 .ndo_stop = e1000_close,
830 .ndo_start_xmit = e1000_xmit_frame,
831 .ndo_set_rx_mode = e1000_set_rx_mode,
832 .ndo_set_mac_address = e1000_set_mac,
833 .ndo_tx_timeout = e1000_tx_timeout,
834 .ndo_change_mtu = e1000_change_mtu,
835 .ndo_eth_ioctl = e1000_ioctl,
836 .ndo_validate_addr = eth_validate_addr,
837 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
838 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
839#ifdef CONFIG_NET_POLL_CONTROLLER
840 .ndo_poll_controller = e1000_netpoll,
841#endif
842 .ndo_fix_features = e1000_fix_features,
843 .ndo_set_features = e1000_set_features,
844};
845
846/**
847 * e1000_init_hw_struct - initialize members of hw struct
848 * @adapter: board private struct
849 * @hw: structure used by e1000_hw.c
850 *
851 * Factors out initialization of the e1000_hw struct to its own function
852 * that can be called very early at init (just after struct allocation).
853 * Fields are initialized based on PCI device information and
854 * OS network device settings (MTU size).
855 * Returns negative error codes if MAC type setup fails.
856 */
857static int e1000_init_hw_struct(struct e1000_adapter *adapter,
858 struct e1000_hw *hw)
859{
860 struct pci_dev *pdev = adapter->pdev;
861
862 /* PCI config space info */
863 hw->vendor_id = pdev->vendor;
864 hw->device_id = pdev->device;
865 hw->subsystem_vendor_id = pdev->subsystem_vendor;
866 hw->subsystem_id = pdev->subsystem_device;
867 hw->revision_id = pdev->revision;
868
869 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
870
871 hw->max_frame_size = adapter->netdev->mtu +
872 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
873 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
874
875 /* identify the MAC */
876 if (e1000_set_mac_type(hw)) {
877 e_err(probe, "Unknown MAC Type\n");
878 return -EIO;
879 }
880
881 switch (hw->mac_type) {
882 default:
883 break;
884 case e1000_82541:
885 case e1000_82547:
886 case e1000_82541_rev_2:
887 case e1000_82547_rev_2:
888 hw->phy_init_script = 1;
889 break;
890 }
891
892 e1000_set_media_type(hw);
893 e1000_get_bus_info(hw);
894
895 hw->wait_autoneg_complete = false;
896 hw->tbi_compatibility_en = true;
897 hw->adaptive_ifs = true;
898
899 /* Copper options */
900
901 if (hw->media_type == e1000_media_type_copper) {
902 hw->mdix = AUTO_ALL_MODES;
903 hw->disable_polarity_correction = false;
904 hw->master_slave = E1000_MASTER_SLAVE;
905 }
906
907 return 0;
908}
909
910/**
911 * e1000_probe - Device Initialization Routine
912 * @pdev: PCI device information struct
913 * @ent: entry in e1000_pci_tbl
914 *
915 * Returns 0 on success, negative on failure
916 *
917 * e1000_probe initializes an adapter identified by a pci_dev structure.
918 * The OS initialization, configuring of the adapter private structure,
919 * and a hardware reset occur.
920 **/
921static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
922{
923 struct net_device *netdev;
924 struct e1000_adapter *adapter = NULL;
925 struct e1000_hw *hw;
926
927 static int cards_found;
928 static int global_quad_port_a; /* global ksp3 port a indication */
929 int i, err, pci_using_dac;
930 u16 eeprom_data = 0;
931 u16 tmp = 0;
932 u16 eeprom_apme_mask = E1000_EEPROM_APME;
933 int bars, need_ioport;
934 bool disable_dev = false;
935
936 /* do not allocate ioport bars when not needed */
937 need_ioport = e1000_is_need_ioport(pdev);
938 if (need_ioport) {
939 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
940 err = pci_enable_device(pdev);
941 } else {
942 bars = pci_select_bars(pdev, IORESOURCE_MEM);
943 err = pci_enable_device_mem(pdev);
944 }
945 if (err)
946 return err;
947
948 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
949 if (err)
950 goto err_pci_reg;
951
952 pci_set_master(pdev);
953 err = pci_save_state(pdev);
954 if (err)
955 goto err_alloc_etherdev;
956
957 err = -ENOMEM;
958 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
959 if (!netdev)
960 goto err_alloc_etherdev;
961
962 SET_NETDEV_DEV(netdev, &pdev->dev);
963
964 pci_set_drvdata(pdev, netdev);
965 adapter = netdev_priv(netdev);
966 adapter->netdev = netdev;
967 adapter->pdev = pdev;
968 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
969 adapter->bars = bars;
970 adapter->need_ioport = need_ioport;
971
972 hw = &adapter->hw;
973 hw->back = adapter;
974
975 err = -EIO;
976 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
977 if (!hw->hw_addr)
978 goto err_ioremap;
979
980 if (adapter->need_ioport) {
981 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
982 if (pci_resource_len(pdev, i) == 0)
983 continue;
984 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
985 hw->io_base = pci_resource_start(pdev, i);
986 break;
987 }
988 }
989 }
990
991 /* make ready for any if (hw->...) below */
992 err = e1000_init_hw_struct(adapter, hw);
993 if (err)
994 goto err_sw_init;
995
996 /* there is a workaround being applied below that limits
997 * 64-bit DMA addresses to 64-bit hardware. There are some
998 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
999 */
1000 pci_using_dac = 0;
1001 if ((hw->bus_type == e1000_bus_type_pcix) &&
1002 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003 pci_using_dac = 1;
1004 } else {
1005 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006 if (err) {
1007 pr_err("No usable DMA config, aborting\n");
1008 goto err_dma;
1009 }
1010 }
1011
1012 netdev->netdev_ops = &e1000_netdev_ops;
1013 e1000_set_ethtool_ops(netdev);
1014 netdev->watchdog_timeo = 5 * HZ;
1015 netif_napi_add(netdev, &adapter->napi, e1000_clean);
1016
1017 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1018
1019 adapter->bd_number = cards_found;
1020
1021 /* setup the private structure */
1022
1023 err = e1000_sw_init(adapter);
1024 if (err)
1025 goto err_sw_init;
1026
1027 err = -EIO;
1028 if (hw->mac_type == e1000_ce4100) {
1029 hw->ce4100_gbe_mdio_base_virt =
1030 ioremap(pci_resource_start(pdev, BAR_1),
1031 pci_resource_len(pdev, BAR_1));
1032
1033 if (!hw->ce4100_gbe_mdio_base_virt)
1034 goto err_mdio_ioremap;
1035 }
1036
1037 if (hw->mac_type >= e1000_82543) {
1038 netdev->hw_features = NETIF_F_SG |
1039 NETIF_F_HW_CSUM |
1040 NETIF_F_HW_VLAN_CTAG_RX;
1041 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042 NETIF_F_HW_VLAN_CTAG_FILTER;
1043 }
1044
1045 if ((hw->mac_type >= e1000_82544) &&
1046 (hw->mac_type != e1000_82547))
1047 netdev->hw_features |= NETIF_F_TSO;
1048
1049 netdev->priv_flags |= IFF_SUPP_NOFCS;
1050
1051 netdev->features |= netdev->hw_features;
1052 netdev->hw_features |= (NETIF_F_RXCSUM |
1053 NETIF_F_RXALL |
1054 NETIF_F_RXFCS);
1055
1056 if (pci_using_dac) {
1057 netdev->features |= NETIF_F_HIGHDMA;
1058 netdev->vlan_features |= NETIF_F_HIGHDMA;
1059 }
1060
1061 netdev->vlan_features |= (NETIF_F_TSO |
1062 NETIF_F_HW_CSUM |
1063 NETIF_F_SG);
1064
1065 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068 netdev->priv_flags |= IFF_UNICAST_FLT;
1069
1070 /* MTU range: 46 - 16110 */
1071 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073
1074 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075
1076 /* initialize eeprom parameters */
1077 if (e1000_init_eeprom_params(hw)) {
1078 e_err(probe, "EEPROM initialization failed\n");
1079 goto err_eeprom;
1080 }
1081
1082 /* before reading the EEPROM, reset the controller to
1083 * put the device in a known good starting state
1084 */
1085
1086 e1000_reset_hw(hw);
1087
1088 /* make sure the EEPROM is good */
1089 if (e1000_validate_eeprom_checksum(hw) < 0) {
1090 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091 e1000_dump_eeprom(adapter);
1092 /* set MAC address to all zeroes to invalidate and temporary
1093 * disable this device for the user. This blocks regular
1094 * traffic while still permitting ethtool ioctls from reaching
1095 * the hardware as well as allowing the user to run the
1096 * interface after manually setting a hw addr using
1097 * `ip set address`
1098 */
1099 memset(hw->mac_addr, 0, netdev->addr_len);
1100 } else {
1101 /* copy the MAC address out of the EEPROM */
1102 if (e1000_read_mac_addr(hw))
1103 e_err(probe, "EEPROM Read Error\n");
1104 }
1105 /* don't block initialization here due to bad MAC address */
1106 eth_hw_addr_set(netdev, hw->mac_addr);
1107
1108 if (!is_valid_ether_addr(netdev->dev_addr))
1109 e_err(probe, "Invalid MAC Address\n");
1110
1111
1112 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114 e1000_82547_tx_fifo_stall_task);
1115 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117
1118 e1000_check_options(adapter);
1119
1120 /* Initial Wake on LAN setting
1121 * If APM wake is enabled in the EEPROM,
1122 * enable the ACPI Magic Packet filter
1123 */
1124
1125 switch (hw->mac_type) {
1126 case e1000_82542_rev2_0:
1127 case e1000_82542_rev2_1:
1128 case e1000_82543:
1129 break;
1130 case e1000_82544:
1131 e1000_read_eeprom(hw,
1132 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134 break;
1135 case e1000_82546:
1136 case e1000_82546_rev_3:
1137 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138 e1000_read_eeprom(hw,
1139 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140 break;
1141 }
1142 fallthrough;
1143 default:
1144 e1000_read_eeprom(hw,
1145 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146 break;
1147 }
1148 if (eeprom_data & eeprom_apme_mask)
1149 adapter->eeprom_wol |= E1000_WUFC_MAG;
1150
1151 /* now that we have the eeprom settings, apply the special cases
1152 * where the eeprom may be wrong or the board simply won't support
1153 * wake on lan on a particular port
1154 */
1155 switch (pdev->device) {
1156 case E1000_DEV_ID_82546GB_PCIE:
1157 adapter->eeprom_wol = 0;
1158 break;
1159 case E1000_DEV_ID_82546EB_FIBER:
1160 case E1000_DEV_ID_82546GB_FIBER:
1161 /* Wake events only supported on port A for dual fiber
1162 * regardless of eeprom setting
1163 */
1164 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165 adapter->eeprom_wol = 0;
1166 break;
1167 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168 /* if quad port adapter, disable WoL on all but port A */
1169 if (global_quad_port_a != 0)
1170 adapter->eeprom_wol = 0;
1171 else
1172 adapter->quad_port_a = true;
1173 /* Reset for multiple quad port adapters */
1174 if (++global_quad_port_a == 4)
1175 global_quad_port_a = 0;
1176 break;
1177 }
1178
1179 /* initialize the wol settings based on the eeprom settings */
1180 adapter->wol = adapter->eeprom_wol;
1181 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182
1183 /* Auto detect PHY address */
1184 if (hw->mac_type == e1000_ce4100) {
1185 for (i = 0; i < 32; i++) {
1186 hw->phy_addr = i;
1187 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188
1189 if (tmp != 0 && tmp != 0xFF)
1190 break;
1191 }
1192
1193 if (i >= 32)
1194 goto err_eeprom;
1195 }
1196
1197 /* reset the hardware with the new settings */
1198 e1000_reset(adapter);
1199
1200 strcpy(netdev->name, "eth%d");
1201 err = register_netdev(netdev);
1202 if (err)
1203 goto err_register;
1204
1205 e1000_vlan_filter_on_off(adapter, false);
1206
1207 /* print bus type/speed/width info */
1208 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215 netdev->dev_addr);
1216
1217 /* carrier off reporting is important to ethtool even BEFORE open */
1218 netif_carrier_off(netdev);
1219
1220 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221
1222 cards_found++;
1223 return 0;
1224
1225err_register:
1226err_eeprom:
1227 e1000_phy_hw_reset(hw);
1228
1229 if (hw->flash_address)
1230 iounmap(hw->flash_address);
1231 kfree(adapter->tx_ring);
1232 kfree(adapter->rx_ring);
1233err_dma:
1234err_sw_init:
1235err_mdio_ioremap:
1236 iounmap(hw->ce4100_gbe_mdio_base_virt);
1237 iounmap(hw->hw_addr);
1238err_ioremap:
1239 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240 free_netdev(netdev);
1241err_alloc_etherdev:
1242 pci_release_selected_regions(pdev, bars);
1243err_pci_reg:
1244 if (!adapter || disable_dev)
1245 pci_disable_device(pdev);
1246 return err;
1247}
1248
1249/**
1250 * e1000_remove - Device Removal Routine
1251 * @pdev: PCI device information struct
1252 *
1253 * e1000_remove is called by the PCI subsystem to alert the driver
1254 * that it should release a PCI device. That could be caused by a
1255 * Hot-Plug event, or because the driver is going to be removed from
1256 * memory.
1257 **/
1258static void e1000_remove(struct pci_dev *pdev)
1259{
1260 struct net_device *netdev = pci_get_drvdata(pdev);
1261 struct e1000_adapter *adapter = netdev_priv(netdev);
1262 struct e1000_hw *hw = &adapter->hw;
1263 bool disable_dev;
1264
1265 e1000_down_and_stop(adapter);
1266 e1000_release_manageability(adapter);
1267
1268 unregister_netdev(netdev);
1269
1270 e1000_phy_hw_reset(hw);
1271
1272 kfree(adapter->tx_ring);
1273 kfree(adapter->rx_ring);
1274
1275 if (hw->mac_type == e1000_ce4100)
1276 iounmap(hw->ce4100_gbe_mdio_base_virt);
1277 iounmap(hw->hw_addr);
1278 if (hw->flash_address)
1279 iounmap(hw->flash_address);
1280 pci_release_selected_regions(pdev, adapter->bars);
1281
1282 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283 free_netdev(netdev);
1284
1285 if (disable_dev)
1286 pci_disable_device(pdev);
1287}
1288
1289/**
1290 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291 * @adapter: board private structure to initialize
1292 *
1293 * e1000_sw_init initializes the Adapter private data structure.
1294 * e1000_init_hw_struct MUST be called before this function
1295 **/
1296static int e1000_sw_init(struct e1000_adapter *adapter)
1297{
1298 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299
1300 adapter->num_tx_queues = 1;
1301 adapter->num_rx_queues = 1;
1302
1303 if (e1000_alloc_queues(adapter)) {
1304 e_err(probe, "Unable to allocate memory for queues\n");
1305 return -ENOMEM;
1306 }
1307
1308 /* Explicitly disable IRQ since the NIC can be in any state. */
1309 e1000_irq_disable(adapter);
1310
1311 spin_lock_init(&adapter->stats_lock);
1312
1313 set_bit(__E1000_DOWN, &adapter->flags);
1314
1315 return 0;
1316}
1317
1318/**
1319 * e1000_alloc_queues - Allocate memory for all rings
1320 * @adapter: board private structure to initialize
1321 *
1322 * We allocate one ring per queue at run-time since we don't know the
1323 * number of queues at compile-time.
1324 **/
1325static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326{
1327 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329 if (!adapter->tx_ring)
1330 return -ENOMEM;
1331
1332 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334 if (!adapter->rx_ring) {
1335 kfree(adapter->tx_ring);
1336 return -ENOMEM;
1337 }
1338
1339 return E1000_SUCCESS;
1340}
1341
1342/**
1343 * e1000_open - Called when a network interface is made active
1344 * @netdev: network interface device structure
1345 *
1346 * Returns 0 on success, negative value on failure
1347 *
1348 * The open entry point is called when a network interface is made
1349 * active by the system (IFF_UP). At this point all resources needed
1350 * for transmit and receive operations are allocated, the interrupt
1351 * handler is registered with the OS, the watchdog task is started,
1352 * and the stack is notified that the interface is ready.
1353 **/
1354int e1000_open(struct net_device *netdev)
1355{
1356 struct e1000_adapter *adapter = netdev_priv(netdev);
1357 struct e1000_hw *hw = &adapter->hw;
1358 int err;
1359
1360 /* disallow open during test */
1361 if (test_bit(__E1000_TESTING, &adapter->flags))
1362 return -EBUSY;
1363
1364 netif_carrier_off(netdev);
1365
1366 /* allocate transmit descriptors */
1367 err = e1000_setup_all_tx_resources(adapter);
1368 if (err)
1369 goto err_setup_tx;
1370
1371 /* allocate receive descriptors */
1372 err = e1000_setup_all_rx_resources(adapter);
1373 if (err)
1374 goto err_setup_rx;
1375
1376 e1000_power_up_phy(adapter);
1377
1378 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379 if ((hw->mng_cookie.status &
1380 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381 e1000_update_mng_vlan(adapter);
1382 }
1383
1384 /* before we allocate an interrupt, we must be ready to handle it.
1385 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386 * as soon as we call pci_request_irq, so we have to setup our
1387 * clean_rx handler before we do so.
1388 */
1389 e1000_configure(adapter);
1390
1391 err = e1000_request_irq(adapter);
1392 if (err)
1393 goto err_req_irq;
1394
1395 /* From here on the code is the same as e1000_up() */
1396 clear_bit(__E1000_DOWN, &adapter->flags);
1397
1398 napi_enable(&adapter->napi);
1399
1400 e1000_irq_enable(adapter);
1401
1402 netif_start_queue(netdev);
1403
1404 /* fire a link status change interrupt to start the watchdog */
1405 ew32(ICS, E1000_ICS_LSC);
1406
1407 return E1000_SUCCESS;
1408
1409err_req_irq:
1410 e1000_power_down_phy(adapter);
1411 e1000_free_all_rx_resources(adapter);
1412err_setup_rx:
1413 e1000_free_all_tx_resources(adapter);
1414err_setup_tx:
1415 e1000_reset(adapter);
1416
1417 return err;
1418}
1419
1420/**
1421 * e1000_close - Disables a network interface
1422 * @netdev: network interface device structure
1423 *
1424 * Returns 0, this is not allowed to fail
1425 *
1426 * The close entry point is called when an interface is de-activated
1427 * by the OS. The hardware is still under the drivers control, but
1428 * needs to be disabled. A global MAC reset is issued to stop the
1429 * hardware, and all transmit and receive resources are freed.
1430 **/
1431int e1000_close(struct net_device *netdev)
1432{
1433 struct e1000_adapter *adapter = netdev_priv(netdev);
1434 struct e1000_hw *hw = &adapter->hw;
1435 int count = E1000_CHECK_RESET_COUNT;
1436
1437 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438 usleep_range(10000, 20000);
1439
1440 WARN_ON(count < 0);
1441
1442 /* signal that we're down so that the reset task will no longer run */
1443 set_bit(__E1000_DOWN, &adapter->flags);
1444 clear_bit(__E1000_RESETTING, &adapter->flags);
1445
1446 e1000_down(adapter);
1447 e1000_power_down_phy(adapter);
1448 e1000_free_irq(adapter);
1449
1450 e1000_free_all_tx_resources(adapter);
1451 e1000_free_all_rx_resources(adapter);
1452
1453 /* kill manageability vlan ID if supported, but not if a vlan with
1454 * the same ID is registered on the host OS (let 8021q kill it)
1455 */
1456 if ((hw->mng_cookie.status &
1457 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460 adapter->mng_vlan_id);
1461 }
1462
1463 return 0;
1464}
1465
1466/**
1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468 * @adapter: address of board private structure
1469 * @start: address of beginning of memory
1470 * @len: length of memory
1471 **/
1472static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473 unsigned long len)
1474{
1475 struct e1000_hw *hw = &adapter->hw;
1476 unsigned long begin = (unsigned long)start;
1477 unsigned long end = begin + len;
1478
1479 /* First rev 82545 and 82546 need to not allow any memory
1480 * write location to cross 64k boundary due to errata 23
1481 */
1482 if (hw->mac_type == e1000_82545 ||
1483 hw->mac_type == e1000_ce4100 ||
1484 hw->mac_type == e1000_82546) {
1485 return ((begin ^ (end - 1)) >> 16) == 0;
1486 }
1487
1488 return true;
1489}
1490
1491/**
1492 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493 * @adapter: board private structure
1494 * @txdr: tx descriptor ring (for a specific queue) to setup
1495 *
1496 * Return 0 on success, negative on failure
1497 **/
1498static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499 struct e1000_tx_ring *txdr)
1500{
1501 struct pci_dev *pdev = adapter->pdev;
1502 int size;
1503
1504 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505 txdr->buffer_info = vzalloc(size);
1506 if (!txdr->buffer_info)
1507 return -ENOMEM;
1508
1509 /* round up to nearest 4K */
1510
1511 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512 txdr->size = ALIGN(txdr->size, 4096);
1513
1514 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515 GFP_KERNEL);
1516 if (!txdr->desc) {
1517setup_tx_desc_die:
1518 vfree(txdr->buffer_info);
1519 return -ENOMEM;
1520 }
1521
1522 /* Fix for errata 23, can't cross 64kB boundary */
1523 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524 void *olddesc = txdr->desc;
1525 dma_addr_t olddma = txdr->dma;
1526 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527 txdr->size, txdr->desc);
1528 /* Try again, without freeing the previous */
1529 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530 &txdr->dma, GFP_KERNEL);
1531 /* Failed allocation, critical failure */
1532 if (!txdr->desc) {
1533 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534 olddma);
1535 goto setup_tx_desc_die;
1536 }
1537
1538 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539 /* give up */
1540 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541 txdr->dma);
1542 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543 olddma);
1544 e_err(probe, "Unable to allocate aligned memory "
1545 "for the transmit descriptor ring\n");
1546 vfree(txdr->buffer_info);
1547 return -ENOMEM;
1548 } else {
1549 /* Free old allocation, new allocation was successful */
1550 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551 olddma);
1552 }
1553 }
1554 memset(txdr->desc, 0, txdr->size);
1555
1556 txdr->next_to_use = 0;
1557 txdr->next_to_clean = 0;
1558
1559 return 0;
1560}
1561
1562/**
1563 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564 * (Descriptors) for all queues
1565 * @adapter: board private structure
1566 *
1567 * Return 0 on success, negative on failure
1568 **/
1569int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570{
1571 int i, err = 0;
1572
1573 for (i = 0; i < adapter->num_tx_queues; i++) {
1574 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575 if (err) {
1576 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577 for (i-- ; i >= 0; i--)
1578 e1000_free_tx_resources(adapter,
1579 &adapter->tx_ring[i]);
1580 break;
1581 }
1582 }
1583
1584 return err;
1585}
1586
1587/**
1588 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589 * @adapter: board private structure
1590 *
1591 * Configure the Tx unit of the MAC after a reset.
1592 **/
1593static void e1000_configure_tx(struct e1000_adapter *adapter)
1594{
1595 u64 tdba;
1596 struct e1000_hw *hw = &adapter->hw;
1597 u32 tdlen, tctl, tipg;
1598 u32 ipgr1, ipgr2;
1599
1600 /* Setup the HW Tx Head and Tail descriptor pointers */
1601
1602 switch (adapter->num_tx_queues) {
1603 case 1:
1604 default:
1605 tdba = adapter->tx_ring[0].dma;
1606 tdlen = adapter->tx_ring[0].count *
1607 sizeof(struct e1000_tx_desc);
1608 ew32(TDLEN, tdlen);
1609 ew32(TDBAH, (tdba >> 32));
1610 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611 ew32(TDT, 0);
1612 ew32(TDH, 0);
1613 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614 E1000_TDH : E1000_82542_TDH);
1615 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616 E1000_TDT : E1000_82542_TDT);
1617 break;
1618 }
1619
1620 /* Set the default values for the Tx Inter Packet Gap timer */
1621 if ((hw->media_type == e1000_media_type_fiber ||
1622 hw->media_type == e1000_media_type_internal_serdes))
1623 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624 else
1625 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626
1627 switch (hw->mac_type) {
1628 case e1000_82542_rev2_0:
1629 case e1000_82542_rev2_1:
1630 tipg = DEFAULT_82542_TIPG_IPGT;
1631 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633 break;
1634 default:
1635 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637 break;
1638 }
1639 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641 ew32(TIPG, tipg);
1642
1643 /* Set the Tx Interrupt Delay register */
1644
1645 ew32(TIDV, adapter->tx_int_delay);
1646 if (hw->mac_type >= e1000_82540)
1647 ew32(TADV, adapter->tx_abs_int_delay);
1648
1649 /* Program the Transmit Control Register */
1650
1651 tctl = er32(TCTL);
1652 tctl &= ~E1000_TCTL_CT;
1653 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655
1656 e1000_config_collision_dist(hw);
1657
1658 /* Setup Transmit Descriptor Settings for eop descriptor */
1659 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660
1661 /* only set IDE if we are delaying interrupts using the timers */
1662 if (adapter->tx_int_delay)
1663 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664
1665 if (hw->mac_type < e1000_82543)
1666 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667 else
1668 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669
1670 /* Cache if we're 82544 running in PCI-X because we'll
1671 * need this to apply a workaround later in the send path.
1672 */
1673 if (hw->mac_type == e1000_82544 &&
1674 hw->bus_type == e1000_bus_type_pcix)
1675 adapter->pcix_82544 = true;
1676
1677 ew32(TCTL, tctl);
1678
1679}
1680
1681/**
1682 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683 * @adapter: board private structure
1684 * @rxdr: rx descriptor ring (for a specific queue) to setup
1685 *
1686 * Returns 0 on success, negative on failure
1687 **/
1688static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689 struct e1000_rx_ring *rxdr)
1690{
1691 struct pci_dev *pdev = adapter->pdev;
1692 int size, desc_len;
1693
1694 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695 rxdr->buffer_info = vzalloc(size);
1696 if (!rxdr->buffer_info)
1697 return -ENOMEM;
1698
1699 desc_len = sizeof(struct e1000_rx_desc);
1700
1701 /* Round up to nearest 4K */
1702
1703 rxdr->size = rxdr->count * desc_len;
1704 rxdr->size = ALIGN(rxdr->size, 4096);
1705
1706 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707 GFP_KERNEL);
1708 if (!rxdr->desc) {
1709setup_rx_desc_die:
1710 vfree(rxdr->buffer_info);
1711 return -ENOMEM;
1712 }
1713
1714 /* Fix for errata 23, can't cross 64kB boundary */
1715 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716 void *olddesc = rxdr->desc;
1717 dma_addr_t olddma = rxdr->dma;
1718 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719 rxdr->size, rxdr->desc);
1720 /* Try again, without freeing the previous */
1721 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722 &rxdr->dma, GFP_KERNEL);
1723 /* Failed allocation, critical failure */
1724 if (!rxdr->desc) {
1725 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726 olddma);
1727 goto setup_rx_desc_die;
1728 }
1729
1730 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731 /* give up */
1732 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733 rxdr->dma);
1734 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735 olddma);
1736 e_err(probe, "Unable to allocate aligned memory for "
1737 "the Rx descriptor ring\n");
1738 goto setup_rx_desc_die;
1739 } else {
1740 /* Free old allocation, new allocation was successful */
1741 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742 olddma);
1743 }
1744 }
1745 memset(rxdr->desc, 0, rxdr->size);
1746
1747 rxdr->next_to_clean = 0;
1748 rxdr->next_to_use = 0;
1749 rxdr->rx_skb_top = NULL;
1750
1751 return 0;
1752}
1753
1754/**
1755 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756 * (Descriptors) for all queues
1757 * @adapter: board private structure
1758 *
1759 * Return 0 on success, negative on failure
1760 **/
1761int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762{
1763 int i, err = 0;
1764
1765 for (i = 0; i < adapter->num_rx_queues; i++) {
1766 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767 if (err) {
1768 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769 for (i-- ; i >= 0; i--)
1770 e1000_free_rx_resources(adapter,
1771 &adapter->rx_ring[i]);
1772 break;
1773 }
1774 }
1775
1776 return err;
1777}
1778
1779/**
1780 * e1000_setup_rctl - configure the receive control registers
1781 * @adapter: Board private structure
1782 **/
1783static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784{
1785 struct e1000_hw *hw = &adapter->hw;
1786 u32 rctl;
1787
1788 rctl = er32(RCTL);
1789
1790 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791
1792 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793 E1000_RCTL_RDMTS_HALF |
1794 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795
1796 if (hw->tbi_compatibility_on == 1)
1797 rctl |= E1000_RCTL_SBP;
1798 else
1799 rctl &= ~E1000_RCTL_SBP;
1800
1801 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802 rctl &= ~E1000_RCTL_LPE;
1803 else
1804 rctl |= E1000_RCTL_LPE;
1805
1806 /* Setup buffer sizes */
1807 rctl &= ~E1000_RCTL_SZ_4096;
1808 rctl |= E1000_RCTL_BSEX;
1809 switch (adapter->rx_buffer_len) {
1810 case E1000_RXBUFFER_2048:
1811 default:
1812 rctl |= E1000_RCTL_SZ_2048;
1813 rctl &= ~E1000_RCTL_BSEX;
1814 break;
1815 case E1000_RXBUFFER_4096:
1816 rctl |= E1000_RCTL_SZ_4096;
1817 break;
1818 case E1000_RXBUFFER_8192:
1819 rctl |= E1000_RCTL_SZ_8192;
1820 break;
1821 case E1000_RXBUFFER_16384:
1822 rctl |= E1000_RCTL_SZ_16384;
1823 break;
1824 }
1825
1826 /* This is useful for sniffing bad packets. */
1827 if (adapter->netdev->features & NETIF_F_RXALL) {
1828 /* UPE and MPE will be handled by normal PROMISC logic
1829 * in e1000e_set_rx_mode
1830 */
1831 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834
1835 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836 E1000_RCTL_DPF | /* Allow filtered pause */
1837 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839 * and that breaks VLANs.
1840 */
1841 }
1842
1843 ew32(RCTL, rctl);
1844}
1845
1846/**
1847 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848 * @adapter: board private structure
1849 *
1850 * Configure the Rx unit of the MAC after a reset.
1851 **/
1852static void e1000_configure_rx(struct e1000_adapter *adapter)
1853{
1854 u64 rdba;
1855 struct e1000_hw *hw = &adapter->hw;
1856 u32 rdlen, rctl, rxcsum;
1857
1858 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859 rdlen = adapter->rx_ring[0].count *
1860 sizeof(struct e1000_rx_desc);
1861 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863 } else {
1864 rdlen = adapter->rx_ring[0].count *
1865 sizeof(struct e1000_rx_desc);
1866 adapter->clean_rx = e1000_clean_rx_irq;
1867 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868 }
1869
1870 /* disable receives while setting up the descriptors */
1871 rctl = er32(RCTL);
1872 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873
1874 /* set the Receive Delay Timer Register */
1875 ew32(RDTR, adapter->rx_int_delay);
1876
1877 if (hw->mac_type >= e1000_82540) {
1878 ew32(RADV, adapter->rx_abs_int_delay);
1879 if (adapter->itr_setting != 0)
1880 ew32(ITR, 1000000000 / (adapter->itr * 256));
1881 }
1882
1883 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1884 * the Base and Length of the Rx Descriptor Ring
1885 */
1886 switch (adapter->num_rx_queues) {
1887 case 1:
1888 default:
1889 rdba = adapter->rx_ring[0].dma;
1890 ew32(RDLEN, rdlen);
1891 ew32(RDBAH, (rdba >> 32));
1892 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893 ew32(RDT, 0);
1894 ew32(RDH, 0);
1895 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896 E1000_RDH : E1000_82542_RDH);
1897 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898 E1000_RDT : E1000_82542_RDT);
1899 break;
1900 }
1901
1902 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903 if (hw->mac_type >= e1000_82543) {
1904 rxcsum = er32(RXCSUM);
1905 if (adapter->rx_csum)
1906 rxcsum |= E1000_RXCSUM_TUOFL;
1907 else
1908 /* don't need to clear IPPCSE as it defaults to 0 */
1909 rxcsum &= ~E1000_RXCSUM_TUOFL;
1910 ew32(RXCSUM, rxcsum);
1911 }
1912
1913 /* Enable Receives */
1914 ew32(RCTL, rctl | E1000_RCTL_EN);
1915}
1916
1917/**
1918 * e1000_free_tx_resources - Free Tx Resources per Queue
1919 * @adapter: board private structure
1920 * @tx_ring: Tx descriptor ring for a specific queue
1921 *
1922 * Free all transmit software resources
1923 **/
1924static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925 struct e1000_tx_ring *tx_ring)
1926{
1927 struct pci_dev *pdev = adapter->pdev;
1928
1929 e1000_clean_tx_ring(adapter, tx_ring);
1930
1931 vfree(tx_ring->buffer_info);
1932 tx_ring->buffer_info = NULL;
1933
1934 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935 tx_ring->dma);
1936
1937 tx_ring->desc = NULL;
1938}
1939
1940/**
1941 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942 * @adapter: board private structure
1943 *
1944 * Free all transmit software resources
1945 **/
1946void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947{
1948 int i;
1949
1950 for (i = 0; i < adapter->num_tx_queues; i++)
1951 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952}
1953
1954static void
1955e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956 struct e1000_tx_buffer *buffer_info,
1957 int budget)
1958{
1959 if (buffer_info->dma) {
1960 if (buffer_info->mapped_as_page)
1961 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1962 buffer_info->length, DMA_TO_DEVICE);
1963 else
1964 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length,
1966 DMA_TO_DEVICE);
1967 buffer_info->dma = 0;
1968 }
1969 if (buffer_info->skb) {
1970 napi_consume_skb(buffer_info->skb, budget);
1971 buffer_info->skb = NULL;
1972 }
1973 buffer_info->time_stamp = 0;
1974 /* buffer_info must be completely set up in the transmit path */
1975}
1976
1977/**
1978 * e1000_clean_tx_ring - Free Tx Buffers
1979 * @adapter: board private structure
1980 * @tx_ring: ring to be cleaned
1981 **/
1982static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983 struct e1000_tx_ring *tx_ring)
1984{
1985 struct e1000_hw *hw = &adapter->hw;
1986 struct e1000_tx_buffer *buffer_info;
1987 unsigned long size;
1988 unsigned int i;
1989
1990 /* Free all the Tx ring sk_buffs */
1991
1992 for (i = 0; i < tx_ring->count; i++) {
1993 buffer_info = &tx_ring->buffer_info[i];
1994 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1995 }
1996
1997 netdev_reset_queue(adapter->netdev);
1998 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1999 memset(tx_ring->buffer_info, 0, size);
2000
2001 /* Zero out the descriptor ring */
2002
2003 memset(tx_ring->desc, 0, tx_ring->size);
2004
2005 tx_ring->next_to_use = 0;
2006 tx_ring->next_to_clean = 0;
2007 tx_ring->last_tx_tso = false;
2008
2009 writel(0, hw->hw_addr + tx_ring->tdh);
2010 writel(0, hw->hw_addr + tx_ring->tdt);
2011}
2012
2013/**
2014 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2015 * @adapter: board private structure
2016 **/
2017static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018{
2019 int i;
2020
2021 for (i = 0; i < adapter->num_tx_queues; i++)
2022 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023}
2024
2025/**
2026 * e1000_free_rx_resources - Free Rx Resources
2027 * @adapter: board private structure
2028 * @rx_ring: ring to clean the resources from
2029 *
2030 * Free all receive software resources
2031 **/
2032static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033 struct e1000_rx_ring *rx_ring)
2034{
2035 struct pci_dev *pdev = adapter->pdev;
2036
2037 e1000_clean_rx_ring(adapter, rx_ring);
2038
2039 vfree(rx_ring->buffer_info);
2040 rx_ring->buffer_info = NULL;
2041
2042 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043 rx_ring->dma);
2044
2045 rx_ring->desc = NULL;
2046}
2047
2048/**
2049 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2050 * @adapter: board private structure
2051 *
2052 * Free all receive software resources
2053 **/
2054void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2055{
2056 int i;
2057
2058 for (i = 0; i < adapter->num_rx_queues; i++)
2059 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2060}
2061
2062#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2063static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2064{
2065 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2066 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2067}
2068
2069static void *e1000_alloc_frag(const struct e1000_adapter *a)
2070{
2071 unsigned int len = e1000_frag_len(a);
2072 u8 *data = netdev_alloc_frag(len);
2073
2074 if (likely(data))
2075 data += E1000_HEADROOM;
2076 return data;
2077}
2078
2079/**
2080 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2081 * @adapter: board private structure
2082 * @rx_ring: ring to free buffers from
2083 **/
2084static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2085 struct e1000_rx_ring *rx_ring)
2086{
2087 struct e1000_hw *hw = &adapter->hw;
2088 struct e1000_rx_buffer *buffer_info;
2089 struct pci_dev *pdev = adapter->pdev;
2090 unsigned long size;
2091 unsigned int i;
2092
2093 /* Free all the Rx netfrags */
2094 for (i = 0; i < rx_ring->count; i++) {
2095 buffer_info = &rx_ring->buffer_info[i];
2096 if (adapter->clean_rx == e1000_clean_rx_irq) {
2097 if (buffer_info->dma)
2098 dma_unmap_single(&pdev->dev, buffer_info->dma,
2099 adapter->rx_buffer_len,
2100 DMA_FROM_DEVICE);
2101 if (buffer_info->rxbuf.data) {
2102 skb_free_frag(buffer_info->rxbuf.data);
2103 buffer_info->rxbuf.data = NULL;
2104 }
2105 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2106 if (buffer_info->dma)
2107 dma_unmap_page(&pdev->dev, buffer_info->dma,
2108 adapter->rx_buffer_len,
2109 DMA_FROM_DEVICE);
2110 if (buffer_info->rxbuf.page) {
2111 put_page(buffer_info->rxbuf.page);
2112 buffer_info->rxbuf.page = NULL;
2113 }
2114 }
2115
2116 buffer_info->dma = 0;
2117 }
2118
2119 /* there also may be some cached data from a chained receive */
2120 napi_free_frags(&adapter->napi);
2121 rx_ring->rx_skb_top = NULL;
2122
2123 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2124 memset(rx_ring->buffer_info, 0, size);
2125
2126 /* Zero out the descriptor ring */
2127 memset(rx_ring->desc, 0, rx_ring->size);
2128
2129 rx_ring->next_to_clean = 0;
2130 rx_ring->next_to_use = 0;
2131
2132 writel(0, hw->hw_addr + rx_ring->rdh);
2133 writel(0, hw->hw_addr + rx_ring->rdt);
2134}
2135
2136/**
2137 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2138 * @adapter: board private structure
2139 **/
2140static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2141{
2142 int i;
2143
2144 for (i = 0; i < adapter->num_rx_queues; i++)
2145 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2146}
2147
2148/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2149 * and memory write and invalidate disabled for certain operations
2150 */
2151static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2152{
2153 struct e1000_hw *hw = &adapter->hw;
2154 struct net_device *netdev = adapter->netdev;
2155 u32 rctl;
2156
2157 e1000_pci_clear_mwi(hw);
2158
2159 rctl = er32(RCTL);
2160 rctl |= E1000_RCTL_RST;
2161 ew32(RCTL, rctl);
2162 E1000_WRITE_FLUSH();
2163 mdelay(5);
2164
2165 if (netif_running(netdev))
2166 e1000_clean_all_rx_rings(adapter);
2167}
2168
2169static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2170{
2171 struct e1000_hw *hw = &adapter->hw;
2172 struct net_device *netdev = adapter->netdev;
2173 u32 rctl;
2174
2175 rctl = er32(RCTL);
2176 rctl &= ~E1000_RCTL_RST;
2177 ew32(RCTL, rctl);
2178 E1000_WRITE_FLUSH();
2179 mdelay(5);
2180
2181 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2182 e1000_pci_set_mwi(hw);
2183
2184 if (netif_running(netdev)) {
2185 /* No need to loop, because 82542 supports only 1 queue */
2186 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2187 e1000_configure_rx(adapter);
2188 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2189 }
2190}
2191
2192/**
2193 * e1000_set_mac - Change the Ethernet Address of the NIC
2194 * @netdev: network interface device structure
2195 * @p: pointer to an address structure
2196 *
2197 * Returns 0 on success, negative on failure
2198 **/
2199static int e1000_set_mac(struct net_device *netdev, void *p)
2200{
2201 struct e1000_adapter *adapter = netdev_priv(netdev);
2202 struct e1000_hw *hw = &adapter->hw;
2203 struct sockaddr *addr = p;
2204
2205 if (!is_valid_ether_addr(addr->sa_data))
2206 return -EADDRNOTAVAIL;
2207
2208 /* 82542 2.0 needs to be in reset to write receive address registers */
2209
2210 if (hw->mac_type == e1000_82542_rev2_0)
2211 e1000_enter_82542_rst(adapter);
2212
2213 eth_hw_addr_set(netdev, addr->sa_data);
2214 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2215
2216 e1000_rar_set(hw, hw->mac_addr, 0);
2217
2218 if (hw->mac_type == e1000_82542_rev2_0)
2219 e1000_leave_82542_rst(adapter);
2220
2221 return 0;
2222}
2223
2224/**
2225 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2226 * @netdev: network interface device structure
2227 *
2228 * The set_rx_mode entry point is called whenever the unicast or multicast
2229 * address lists or the network interface flags are updated. This routine is
2230 * responsible for configuring the hardware for proper unicast, multicast,
2231 * promiscuous mode, and all-multi behavior.
2232 **/
2233static void e1000_set_rx_mode(struct net_device *netdev)
2234{
2235 struct e1000_adapter *adapter = netdev_priv(netdev);
2236 struct e1000_hw *hw = &adapter->hw;
2237 struct netdev_hw_addr *ha;
2238 bool use_uc = false;
2239 u32 rctl;
2240 u32 hash_value;
2241 int i, rar_entries = E1000_RAR_ENTRIES;
2242 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2243 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2244
2245 if (!mcarray)
2246 return;
2247
2248 /* Check for Promiscuous and All Multicast modes */
2249
2250 rctl = er32(RCTL);
2251
2252 if (netdev->flags & IFF_PROMISC) {
2253 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254 rctl &= ~E1000_RCTL_VFE;
2255 } else {
2256 if (netdev->flags & IFF_ALLMULTI)
2257 rctl |= E1000_RCTL_MPE;
2258 else
2259 rctl &= ~E1000_RCTL_MPE;
2260 /* Enable VLAN filter if there is a VLAN */
2261 if (e1000_vlan_used(adapter))
2262 rctl |= E1000_RCTL_VFE;
2263 }
2264
2265 if (netdev_uc_count(netdev) > rar_entries - 1) {
2266 rctl |= E1000_RCTL_UPE;
2267 } else if (!(netdev->flags & IFF_PROMISC)) {
2268 rctl &= ~E1000_RCTL_UPE;
2269 use_uc = true;
2270 }
2271
2272 ew32(RCTL, rctl);
2273
2274 /* 82542 2.0 needs to be in reset to write receive address registers */
2275
2276 if (hw->mac_type == e1000_82542_rev2_0)
2277 e1000_enter_82542_rst(adapter);
2278
2279 /* load the first 14 addresses into the exact filters 1-14. Unicast
2280 * addresses take precedence to avoid disabling unicast filtering
2281 * when possible.
2282 *
2283 * RAR 0 is used for the station MAC address
2284 * if there are not 14 addresses, go ahead and clear the filters
2285 */
2286 i = 1;
2287 if (use_uc)
2288 netdev_for_each_uc_addr(ha, netdev) {
2289 if (i == rar_entries)
2290 break;
2291 e1000_rar_set(hw, ha->addr, i++);
2292 }
2293
2294 netdev_for_each_mc_addr(ha, netdev) {
2295 if (i == rar_entries) {
2296 /* load any remaining addresses into the hash table */
2297 u32 hash_reg, hash_bit, mta;
2298 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299 hash_reg = (hash_value >> 5) & 0x7F;
2300 hash_bit = hash_value & 0x1F;
2301 mta = (1 << hash_bit);
2302 mcarray[hash_reg] |= mta;
2303 } else {
2304 e1000_rar_set(hw, ha->addr, i++);
2305 }
2306 }
2307
2308 for (; i < rar_entries; i++) {
2309 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310 E1000_WRITE_FLUSH();
2311 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312 E1000_WRITE_FLUSH();
2313 }
2314
2315 /* write the hash table completely, write from bottom to avoid
2316 * both stupid write combining chipsets, and flushing each write
2317 */
2318 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2319 /* If we are on an 82544 has an errata where writing odd
2320 * offsets overwrites the previous even offset, but writing
2321 * backwards over the range solves the issue by always
2322 * writing the odd offset first
2323 */
2324 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325 }
2326 E1000_WRITE_FLUSH();
2327
2328 if (hw->mac_type == e1000_82542_rev2_0)
2329 e1000_leave_82542_rst(adapter);
2330
2331 kfree(mcarray);
2332}
2333
2334/**
2335 * e1000_update_phy_info_task - get phy info
2336 * @work: work struct contained inside adapter struct
2337 *
2338 * Need to wait a few seconds after link up to get diagnostic information from
2339 * the phy
2340 */
2341static void e1000_update_phy_info_task(struct work_struct *work)
2342{
2343 struct e1000_adapter *adapter = container_of(work,
2344 struct e1000_adapter,
2345 phy_info_task.work);
2346
2347 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2348}
2349
2350/**
2351 * e1000_82547_tx_fifo_stall_task - task to complete work
2352 * @work: work struct contained inside adapter struct
2353 **/
2354static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355{
2356 struct e1000_adapter *adapter = container_of(work,
2357 struct e1000_adapter,
2358 fifo_stall_task.work);
2359 struct e1000_hw *hw = &adapter->hw;
2360 struct net_device *netdev = adapter->netdev;
2361 u32 tctl;
2362
2363 if (atomic_read(&adapter->tx_fifo_stall)) {
2364 if ((er32(TDT) == er32(TDH)) &&
2365 (er32(TDFT) == er32(TDFH)) &&
2366 (er32(TDFTS) == er32(TDFHS))) {
2367 tctl = er32(TCTL);
2368 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2369 ew32(TDFT, adapter->tx_head_addr);
2370 ew32(TDFH, adapter->tx_head_addr);
2371 ew32(TDFTS, adapter->tx_head_addr);
2372 ew32(TDFHS, adapter->tx_head_addr);
2373 ew32(TCTL, tctl);
2374 E1000_WRITE_FLUSH();
2375
2376 adapter->tx_fifo_head = 0;
2377 atomic_set(&adapter->tx_fifo_stall, 0);
2378 netif_wake_queue(netdev);
2379 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2380 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2381 }
2382 }
2383}
2384
2385bool e1000_has_link(struct e1000_adapter *adapter)
2386{
2387 struct e1000_hw *hw = &adapter->hw;
2388 bool link_active = false;
2389
2390 /* get_link_status is set on LSC (link status) interrupt or rx
2391 * sequence error interrupt (except on intel ce4100).
2392 * get_link_status will stay false until the
2393 * e1000_check_for_link establishes link for copper adapters
2394 * ONLY
2395 */
2396 switch (hw->media_type) {
2397 case e1000_media_type_copper:
2398 if (hw->mac_type == e1000_ce4100)
2399 hw->get_link_status = 1;
2400 if (hw->get_link_status) {
2401 e1000_check_for_link(hw);
2402 link_active = !hw->get_link_status;
2403 } else {
2404 link_active = true;
2405 }
2406 break;
2407 case e1000_media_type_fiber:
2408 e1000_check_for_link(hw);
2409 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410 break;
2411 case e1000_media_type_internal_serdes:
2412 e1000_check_for_link(hw);
2413 link_active = hw->serdes_has_link;
2414 break;
2415 default:
2416 break;
2417 }
2418
2419 return link_active;
2420}
2421
2422/**
2423 * e1000_watchdog - work function
2424 * @work: work struct contained inside adapter struct
2425 **/
2426static void e1000_watchdog(struct work_struct *work)
2427{
2428 struct e1000_adapter *adapter = container_of(work,
2429 struct e1000_adapter,
2430 watchdog_task.work);
2431 struct e1000_hw *hw = &adapter->hw;
2432 struct net_device *netdev = adapter->netdev;
2433 struct e1000_tx_ring *txdr = adapter->tx_ring;
2434 u32 link, tctl;
2435
2436 link = e1000_has_link(adapter);
2437 if ((netif_carrier_ok(netdev)) && link)
2438 goto link_up;
2439
2440 if (link) {
2441 if (!netif_carrier_ok(netdev)) {
2442 u32 ctrl;
2443 /* update snapshot of PHY registers on LSC */
2444 e1000_get_speed_and_duplex(hw,
2445 &adapter->link_speed,
2446 &adapter->link_duplex);
2447
2448 ctrl = er32(CTRL);
2449 pr_info("%s NIC Link is Up %d Mbps %s, "
2450 "Flow Control: %s\n",
2451 netdev->name,
2452 adapter->link_speed,
2453 adapter->link_duplex == FULL_DUPLEX ?
2454 "Full Duplex" : "Half Duplex",
2455 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2456 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2457 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2458 E1000_CTRL_TFCE) ? "TX" : "None")));
2459
2460 /* adjust timeout factor according to speed/duplex */
2461 adapter->tx_timeout_factor = 1;
2462 switch (adapter->link_speed) {
2463 case SPEED_10:
2464 adapter->tx_timeout_factor = 16;
2465 break;
2466 case SPEED_100:
2467 /* maybe add some timeout factor ? */
2468 break;
2469 }
2470
2471 /* enable transmits in the hardware */
2472 tctl = er32(TCTL);
2473 tctl |= E1000_TCTL_EN;
2474 ew32(TCTL, tctl);
2475
2476 netif_carrier_on(netdev);
2477 if (!test_bit(__E1000_DOWN, &adapter->flags))
2478 schedule_delayed_work(&adapter->phy_info_task,
2479 2 * HZ);
2480 adapter->smartspeed = 0;
2481 }
2482 } else {
2483 if (netif_carrier_ok(netdev)) {
2484 adapter->link_speed = 0;
2485 adapter->link_duplex = 0;
2486 pr_info("%s NIC Link is Down\n",
2487 netdev->name);
2488 netif_carrier_off(netdev);
2489
2490 if (!test_bit(__E1000_DOWN, &adapter->flags))
2491 schedule_delayed_work(&adapter->phy_info_task,
2492 2 * HZ);
2493 }
2494
2495 e1000_smartspeed(adapter);
2496 }
2497
2498link_up:
2499 e1000_update_stats(adapter);
2500
2501 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2502 adapter->tpt_old = adapter->stats.tpt;
2503 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2504 adapter->colc_old = adapter->stats.colc;
2505
2506 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2507 adapter->gorcl_old = adapter->stats.gorcl;
2508 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2509 adapter->gotcl_old = adapter->stats.gotcl;
2510
2511 e1000_update_adaptive(hw);
2512
2513 if (!netif_carrier_ok(netdev)) {
2514 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2515 /* We've lost link, so the controller stops DMA,
2516 * but we've got queued Tx work that's never going
2517 * to get done, so reset controller to flush Tx.
2518 * (Do the reset outside of interrupt context).
2519 */
2520 adapter->tx_timeout_count++;
2521 schedule_work(&adapter->reset_task);
2522 /* exit immediately since reset is imminent */
2523 return;
2524 }
2525 }
2526
2527 /* Simple mode for Interrupt Throttle Rate (ITR) */
2528 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2529 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2530 * Total asymmetrical Tx or Rx gets ITR=8000;
2531 * everyone else is between 2000-8000.
2532 */
2533 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2534 u32 dif = (adapter->gotcl > adapter->gorcl ?
2535 adapter->gotcl - adapter->gorcl :
2536 adapter->gorcl - adapter->gotcl) / 10000;
2537 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2538
2539 ew32(ITR, 1000000000 / (itr * 256));
2540 }
2541
2542 /* Cause software interrupt to ensure rx ring is cleaned */
2543 ew32(ICS, E1000_ICS_RXDMT0);
2544
2545 /* Force detection of hung controller every watchdog period */
2546 adapter->detect_tx_hung = true;
2547
2548 /* Reschedule the task */
2549 if (!test_bit(__E1000_DOWN, &adapter->flags))
2550 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2551}
2552
2553enum latency_range {
2554 lowest_latency = 0,
2555 low_latency = 1,
2556 bulk_latency = 2,
2557 latency_invalid = 255
2558};
2559
2560/**
2561 * e1000_update_itr - update the dynamic ITR value based on statistics
2562 * @adapter: pointer to adapter
2563 * @itr_setting: current adapter->itr
2564 * @packets: the number of packets during this measurement interval
2565 * @bytes: the number of bytes during this measurement interval
2566 *
2567 * Stores a new ITR value based on packets and byte
2568 * counts during the last interrupt. The advantage of per interrupt
2569 * computation is faster updates and more accurate ITR for the current
2570 * traffic pattern. Constants in this function were computed
2571 * based on theoretical maximum wire speed and thresholds were set based
2572 * on testing data as well as attempting to minimize response time
2573 * while increasing bulk throughput.
2574 * this functionality is controlled by the InterruptThrottleRate module
2575 * parameter (see e1000_param.c)
2576 **/
2577static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2578 u16 itr_setting, int packets, int bytes)
2579{
2580 unsigned int retval = itr_setting;
2581 struct e1000_hw *hw = &adapter->hw;
2582
2583 if (unlikely(hw->mac_type < e1000_82540))
2584 goto update_itr_done;
2585
2586 if (packets == 0)
2587 goto update_itr_done;
2588
2589 switch (itr_setting) {
2590 case lowest_latency:
2591 /* jumbo frames get bulk treatment*/
2592 if (bytes/packets > 8000)
2593 retval = bulk_latency;
2594 else if ((packets < 5) && (bytes > 512))
2595 retval = low_latency;
2596 break;
2597 case low_latency: /* 50 usec aka 20000 ints/s */
2598 if (bytes > 10000) {
2599 /* jumbo frames need bulk latency setting */
2600 if (bytes/packets > 8000)
2601 retval = bulk_latency;
2602 else if ((packets < 10) || ((bytes/packets) > 1200))
2603 retval = bulk_latency;
2604 else if ((packets > 35))
2605 retval = lowest_latency;
2606 } else if (bytes/packets > 2000)
2607 retval = bulk_latency;
2608 else if (packets <= 2 && bytes < 512)
2609 retval = lowest_latency;
2610 break;
2611 case bulk_latency: /* 250 usec aka 4000 ints/s */
2612 if (bytes > 25000) {
2613 if (packets > 35)
2614 retval = low_latency;
2615 } else if (bytes < 6000) {
2616 retval = low_latency;
2617 }
2618 break;
2619 }
2620
2621update_itr_done:
2622 return retval;
2623}
2624
2625static void e1000_set_itr(struct e1000_adapter *adapter)
2626{
2627 struct e1000_hw *hw = &adapter->hw;
2628 u16 current_itr;
2629 u32 new_itr = adapter->itr;
2630
2631 if (unlikely(hw->mac_type < e1000_82540))
2632 return;
2633
2634 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2635 if (unlikely(adapter->link_speed != SPEED_1000)) {
2636 new_itr = 4000;
2637 goto set_itr_now;
2638 }
2639
2640 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641 adapter->total_tx_packets,
2642 adapter->total_tx_bytes);
2643 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2644 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645 adapter->tx_itr = low_latency;
2646
2647 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648 adapter->total_rx_packets,
2649 adapter->total_rx_bytes);
2650 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2651 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652 adapter->rx_itr = low_latency;
2653
2654 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655
2656 switch (current_itr) {
2657 /* counts and packets in update_itr are dependent on these numbers */
2658 case lowest_latency:
2659 new_itr = 70000;
2660 break;
2661 case low_latency:
2662 new_itr = 20000; /* aka hwitr = ~200 */
2663 break;
2664 case bulk_latency:
2665 new_itr = 4000;
2666 break;
2667 default:
2668 break;
2669 }
2670
2671set_itr_now:
2672 if (new_itr != adapter->itr) {
2673 /* this attempts to bias the interrupt rate towards Bulk
2674 * by adding intermediate steps when interrupt rate is
2675 * increasing
2676 */
2677 new_itr = new_itr > adapter->itr ?
2678 min(adapter->itr + (new_itr >> 2), new_itr) :
2679 new_itr;
2680 adapter->itr = new_itr;
2681 ew32(ITR, 1000000000 / (new_itr * 256));
2682 }
2683}
2684
2685#define E1000_TX_FLAGS_CSUM 0x00000001
2686#define E1000_TX_FLAGS_VLAN 0x00000002
2687#define E1000_TX_FLAGS_TSO 0x00000004
2688#define E1000_TX_FLAGS_IPV4 0x00000008
2689#define E1000_TX_FLAGS_NO_FCS 0x00000010
2690#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2691#define E1000_TX_FLAGS_VLAN_SHIFT 16
2692
2693static int e1000_tso(struct e1000_adapter *adapter,
2694 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2695 __be16 protocol)
2696{
2697 struct e1000_context_desc *context_desc;
2698 struct e1000_tx_buffer *buffer_info;
2699 unsigned int i;
2700 u32 cmd_length = 0;
2701 u16 ipcse = 0, tucse, mss;
2702 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703
2704 if (skb_is_gso(skb)) {
2705 int err;
2706
2707 err = skb_cow_head(skb, 0);
2708 if (err < 0)
2709 return err;
2710
2711 hdr_len = skb_tcp_all_headers(skb);
2712 mss = skb_shinfo(skb)->gso_size;
2713 if (protocol == htons(ETH_P_IP)) {
2714 struct iphdr *iph = ip_hdr(skb);
2715 iph->tot_len = 0;
2716 iph->check = 0;
2717 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718 iph->daddr, 0,
2719 IPPROTO_TCP,
2720 0);
2721 cmd_length = E1000_TXD_CMD_IP;
2722 ipcse = skb_transport_offset(skb) - 1;
2723 } else if (skb_is_gso_v6(skb)) {
2724 tcp_v6_gso_csum_prep(skb);
2725 ipcse = 0;
2726 }
2727 ipcss = skb_network_offset(skb);
2728 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729 tucss = skb_transport_offset(skb);
2730 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731 tucse = 0;
2732
2733 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735
2736 i = tx_ring->next_to_use;
2737 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738 buffer_info = &tx_ring->buffer_info[i];
2739
2740 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2741 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2742 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2743 context_desc->upper_setup.tcp_fields.tucss = tucss;
2744 context_desc->upper_setup.tcp_fields.tucso = tucso;
2745 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2747 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749
2750 buffer_info->time_stamp = jiffies;
2751 buffer_info->next_to_watch = i;
2752
2753 if (++i == tx_ring->count)
2754 i = 0;
2755
2756 tx_ring->next_to_use = i;
2757
2758 return true;
2759 }
2760 return false;
2761}
2762
2763static bool e1000_tx_csum(struct e1000_adapter *adapter,
2764 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2765 __be16 protocol)
2766{
2767 struct e1000_context_desc *context_desc;
2768 struct e1000_tx_buffer *buffer_info;
2769 unsigned int i;
2770 u8 css;
2771 u32 cmd_len = E1000_TXD_CMD_DEXT;
2772
2773 if (skb->ip_summed != CHECKSUM_PARTIAL)
2774 return false;
2775
2776 switch (protocol) {
2777 case cpu_to_be16(ETH_P_IP):
2778 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779 cmd_len |= E1000_TXD_CMD_TCP;
2780 break;
2781 case cpu_to_be16(ETH_P_IPV6):
2782 /* XXX not handling all IPV6 headers */
2783 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784 cmd_len |= E1000_TXD_CMD_TCP;
2785 break;
2786 default:
2787 if (unlikely(net_ratelimit()))
2788 e_warn(drv, "checksum_partial proto=%x!\n",
2789 skb->protocol);
2790 break;
2791 }
2792
2793 css = skb_checksum_start_offset(skb);
2794
2795 i = tx_ring->next_to_use;
2796 buffer_info = &tx_ring->buffer_info[i];
2797 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798
2799 context_desc->lower_setup.ip_config = 0;
2800 context_desc->upper_setup.tcp_fields.tucss = css;
2801 context_desc->upper_setup.tcp_fields.tucso =
2802 css + skb->csum_offset;
2803 context_desc->upper_setup.tcp_fields.tucse = 0;
2804 context_desc->tcp_seg_setup.data = 0;
2805 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806
2807 buffer_info->time_stamp = jiffies;
2808 buffer_info->next_to_watch = i;
2809
2810 if (unlikely(++i == tx_ring->count))
2811 i = 0;
2812
2813 tx_ring->next_to_use = i;
2814
2815 return true;
2816}
2817
2818#define E1000_MAX_TXD_PWR 12
2819#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2820
2821static int e1000_tx_map(struct e1000_adapter *adapter,
2822 struct e1000_tx_ring *tx_ring,
2823 struct sk_buff *skb, unsigned int first,
2824 unsigned int max_per_txd, unsigned int nr_frags,
2825 unsigned int mss)
2826{
2827 struct e1000_hw *hw = &adapter->hw;
2828 struct pci_dev *pdev = adapter->pdev;
2829 struct e1000_tx_buffer *buffer_info;
2830 unsigned int len = skb_headlen(skb);
2831 unsigned int offset = 0, size, count = 0, i;
2832 unsigned int f, bytecount, segs;
2833
2834 i = tx_ring->next_to_use;
2835
2836 while (len) {
2837 buffer_info = &tx_ring->buffer_info[i];
2838 size = min(len, max_per_txd);
2839 /* Workaround for Controller erratum --
2840 * descriptor for non-tso packet in a linear SKB that follows a
2841 * tso gets written back prematurely before the data is fully
2842 * DMA'd to the controller
2843 */
2844 if (!skb->data_len && tx_ring->last_tx_tso &&
2845 !skb_is_gso(skb)) {
2846 tx_ring->last_tx_tso = false;
2847 size -= 4;
2848 }
2849
2850 /* Workaround for premature desc write-backs
2851 * in TSO mode. Append 4-byte sentinel desc
2852 */
2853 if (unlikely(mss && !nr_frags && size == len && size > 8))
2854 size -= 4;
2855 /* work-around for errata 10 and it applies
2856 * to all controllers in PCI-X mode
2857 * The fix is to make sure that the first descriptor of a
2858 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2859 */
2860 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861 (size > 2015) && count == 0))
2862 size = 2015;
2863
2864 /* Workaround for potential 82544 hang in PCI-X. Avoid
2865 * terminating buffers within evenly-aligned dwords.
2866 */
2867 if (unlikely(adapter->pcix_82544 &&
2868 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869 size > 4))
2870 size -= 4;
2871
2872 buffer_info->length = size;
2873 /* set time_stamp *before* dma to help avoid a possible race */
2874 buffer_info->time_stamp = jiffies;
2875 buffer_info->mapped_as_page = false;
2876 buffer_info->dma = dma_map_single(&pdev->dev,
2877 skb->data + offset,
2878 size, DMA_TO_DEVICE);
2879 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880 goto dma_error;
2881 buffer_info->next_to_watch = i;
2882
2883 len -= size;
2884 offset += size;
2885 count++;
2886 if (len) {
2887 i++;
2888 if (unlikely(i == tx_ring->count))
2889 i = 0;
2890 }
2891 }
2892
2893 for (f = 0; f < nr_frags; f++) {
2894 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895
2896 len = skb_frag_size(frag);
2897 offset = 0;
2898
2899 while (len) {
2900 unsigned long bufend;
2901 i++;
2902 if (unlikely(i == tx_ring->count))
2903 i = 0;
2904
2905 buffer_info = &tx_ring->buffer_info[i];
2906 size = min(len, max_per_txd);
2907 /* Workaround for premature desc write-backs
2908 * in TSO mode. Append 4-byte sentinel desc
2909 */
2910 if (unlikely(mss && f == (nr_frags-1) &&
2911 size == len && size > 8))
2912 size -= 4;
2913 /* Workaround for potential 82544 hang in PCI-X.
2914 * Avoid terminating buffers within evenly-aligned
2915 * dwords.
2916 */
2917 bufend = (unsigned long)
2918 page_to_phys(skb_frag_page(frag));
2919 bufend += offset + size - 1;
2920 if (unlikely(adapter->pcix_82544 &&
2921 !(bufend & 4) &&
2922 size > 4))
2923 size -= 4;
2924
2925 buffer_info->length = size;
2926 buffer_info->time_stamp = jiffies;
2927 buffer_info->mapped_as_page = true;
2928 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929 offset, size, DMA_TO_DEVICE);
2930 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931 goto dma_error;
2932 buffer_info->next_to_watch = i;
2933
2934 len -= size;
2935 offset += size;
2936 count++;
2937 }
2938 }
2939
2940 segs = skb_shinfo(skb)->gso_segs ?: 1;
2941 /* multiply data chunks by size of headers */
2942 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2943
2944 tx_ring->buffer_info[i].skb = skb;
2945 tx_ring->buffer_info[i].segs = segs;
2946 tx_ring->buffer_info[i].bytecount = bytecount;
2947 tx_ring->buffer_info[first].next_to_watch = i;
2948
2949 return count;
2950
2951dma_error:
2952 dev_err(&pdev->dev, "TX DMA map failed\n");
2953 buffer_info->dma = 0;
2954 if (count)
2955 count--;
2956
2957 while (count--) {
2958 if (i == 0)
2959 i += tx_ring->count;
2960 i--;
2961 buffer_info = &tx_ring->buffer_info[i];
2962 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2963 }
2964
2965 return 0;
2966}
2967
2968static void e1000_tx_queue(struct e1000_adapter *adapter,
2969 struct e1000_tx_ring *tx_ring, int tx_flags,
2970 int count)
2971{
2972 struct e1000_tx_desc *tx_desc = NULL;
2973 struct e1000_tx_buffer *buffer_info;
2974 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975 unsigned int i;
2976
2977 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979 E1000_TXD_CMD_TSE;
2980 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981
2982 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984 }
2985
2986 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989 }
2990
2991 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992 txd_lower |= E1000_TXD_CMD_VLE;
2993 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994 }
2995
2996 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2997 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2998
2999 i = tx_ring->next_to_use;
3000
3001 while (count--) {
3002 buffer_info = &tx_ring->buffer_info[i];
3003 tx_desc = E1000_TX_DESC(*tx_ring, i);
3004 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005 tx_desc->lower.data =
3006 cpu_to_le32(txd_lower | buffer_info->length);
3007 tx_desc->upper.data = cpu_to_le32(txd_upper);
3008 if (unlikely(++i == tx_ring->count))
3009 i = 0;
3010 }
3011
3012 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013
3014 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3015 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3016 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3017
3018 /* Force memory writes to complete before letting h/w
3019 * know there are new descriptors to fetch. (Only
3020 * applicable for weak-ordered memory model archs,
3021 * such as IA-64).
3022 */
3023 dma_wmb();
3024
3025 tx_ring->next_to_use = i;
3026}
3027
3028/* 82547 workaround to avoid controller hang in half-duplex environment.
3029 * The workaround is to avoid queuing a large packet that would span
3030 * the internal Tx FIFO ring boundary by notifying the stack to resend
3031 * the packet at a later time. This gives the Tx FIFO an opportunity to
3032 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3033 * to the beginning of the Tx FIFO.
3034 */
3035
3036#define E1000_FIFO_HDR 0x10
3037#define E1000_82547_PAD_LEN 0x3E0
3038
3039static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040 struct sk_buff *skb)
3041{
3042 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044
3045 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046
3047 if (adapter->link_duplex != HALF_DUPLEX)
3048 goto no_fifo_stall_required;
3049
3050 if (atomic_read(&adapter->tx_fifo_stall))
3051 return 1;
3052
3053 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054 atomic_set(&adapter->tx_fifo_stall, 1);
3055 return 1;
3056 }
3057
3058no_fifo_stall_required:
3059 adapter->tx_fifo_head += skb_fifo_len;
3060 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062 return 0;
3063}
3064
3065static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066{
3067 struct e1000_adapter *adapter = netdev_priv(netdev);
3068 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069
3070 netif_stop_queue(netdev);
3071 /* Herbert's original patch had:
3072 * smp_mb__after_netif_stop_queue();
3073 * but since that doesn't exist yet, just open code it.
3074 */
3075 smp_mb();
3076
3077 /* We need to check again in a case another CPU has just
3078 * made room available.
3079 */
3080 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081 return -EBUSY;
3082
3083 /* A reprieve! */
3084 netif_start_queue(netdev);
3085 ++adapter->restart_queue;
3086 return 0;
3087}
3088
3089static int e1000_maybe_stop_tx(struct net_device *netdev,
3090 struct e1000_tx_ring *tx_ring, int size)
3091{
3092 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093 return 0;
3094 return __e1000_maybe_stop_tx(netdev, size);
3095}
3096
3097#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3098static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099 struct net_device *netdev)
3100{
3101 struct e1000_adapter *adapter = netdev_priv(netdev);
3102 struct e1000_hw *hw = &adapter->hw;
3103 struct e1000_tx_ring *tx_ring;
3104 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106 unsigned int tx_flags = 0;
3107 unsigned int len = skb_headlen(skb);
3108 unsigned int nr_frags;
3109 unsigned int mss;
3110 int count = 0;
3111 int tso;
3112 unsigned int f;
3113 __be16 protocol = vlan_get_protocol(skb);
3114
3115 /* This goes back to the question of how to logically map a Tx queue
3116 * to a flow. Right now, performance is impacted slightly negatively
3117 * if using multiple Tx queues. If the stack breaks away from a
3118 * single qdisc implementation, we can look at this again.
3119 */
3120 tx_ring = adapter->tx_ring;
3121
3122 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3123 * packets may get corrupted during padding by HW.
3124 * To WA this issue, pad all small packets manually.
3125 */
3126 if (eth_skb_pad(skb))
3127 return NETDEV_TX_OK;
3128
3129 mss = skb_shinfo(skb)->gso_size;
3130 /* The controller does a simple calculation to
3131 * make sure there is enough room in the FIFO before
3132 * initiating the DMA for each buffer. The calc is:
3133 * 4 = ceil(buffer len/mss). To make sure we don't
3134 * overrun the FIFO, adjust the max buffer len if mss
3135 * drops.
3136 */
3137 if (mss) {
3138 u8 hdr_len;
3139 max_per_txd = min(mss << 2, max_per_txd);
3140 max_txd_pwr = fls(max_per_txd) - 1;
3141
3142 hdr_len = skb_tcp_all_headers(skb);
3143 if (skb->data_len && hdr_len == len) {
3144 switch (hw->mac_type) {
3145 case e1000_82544: {
3146 unsigned int pull_size;
3147
3148 /* Make sure we have room to chop off 4 bytes,
3149 * and that the end alignment will work out to
3150 * this hardware's requirements
3151 * NOTE: this is a TSO only workaround
3152 * if end byte alignment not correct move us
3153 * into the next dword
3154 */
3155 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3156 & 4)
3157 break;
3158 pull_size = min((unsigned int)4, skb->data_len);
3159 if (!__pskb_pull_tail(skb, pull_size)) {
3160 e_err(drv, "__pskb_pull_tail "
3161 "failed.\n");
3162 dev_kfree_skb_any(skb);
3163 return NETDEV_TX_OK;
3164 }
3165 len = skb_headlen(skb);
3166 break;
3167 }
3168 default:
3169 /* do nothing */
3170 break;
3171 }
3172 }
3173 }
3174
3175 /* reserve a descriptor for the offload context */
3176 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177 count++;
3178 count++;
3179
3180 /* Controller Erratum workaround */
3181 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182 count++;
3183
3184 count += TXD_USE_COUNT(len, max_txd_pwr);
3185
3186 if (adapter->pcix_82544)
3187 count++;
3188
3189 /* work-around for errata 10 and it applies to all controllers
3190 * in PCI-X mode, so add one more descriptor to the count
3191 */
3192 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193 (len > 2015)))
3194 count++;
3195
3196 nr_frags = skb_shinfo(skb)->nr_frags;
3197 for (f = 0; f < nr_frags; f++)
3198 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199 max_txd_pwr);
3200 if (adapter->pcix_82544)
3201 count += nr_frags;
3202
3203 /* need: count + 2 desc gap to keep tail from touching
3204 * head, otherwise try next time
3205 */
3206 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207 return NETDEV_TX_BUSY;
3208
3209 if (unlikely((hw->mac_type == e1000_82547) &&
3210 (e1000_82547_fifo_workaround(adapter, skb)))) {
3211 netif_stop_queue(netdev);
3212 if (!test_bit(__E1000_DOWN, &adapter->flags))
3213 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214 return NETDEV_TX_BUSY;
3215 }
3216
3217 if (skb_vlan_tag_present(skb)) {
3218 tx_flags |= E1000_TX_FLAGS_VLAN;
3219 tx_flags |= (skb_vlan_tag_get(skb) <<
3220 E1000_TX_FLAGS_VLAN_SHIFT);
3221 }
3222
3223 first = tx_ring->next_to_use;
3224
3225 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226 if (tso < 0) {
3227 dev_kfree_skb_any(skb);
3228 return NETDEV_TX_OK;
3229 }
3230
3231 if (likely(tso)) {
3232 if (likely(hw->mac_type != e1000_82544))
3233 tx_ring->last_tx_tso = true;
3234 tx_flags |= E1000_TX_FLAGS_TSO;
3235 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236 tx_flags |= E1000_TX_FLAGS_CSUM;
3237
3238 if (protocol == htons(ETH_P_IP))
3239 tx_flags |= E1000_TX_FLAGS_IPV4;
3240
3241 if (unlikely(skb->no_fcs))
3242 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3243
3244 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245 nr_frags, mss);
3246
3247 if (count) {
3248 /* The descriptors needed is higher than other Intel drivers
3249 * due to a number of workarounds. The breakdown is below:
3250 * Data descriptors: MAX_SKB_FRAGS + 1
3251 * Context Descriptor: 1
3252 * Keep head from touching tail: 2
3253 * Workarounds: 3
3254 */
3255 int desc_needed = MAX_SKB_FRAGS + 7;
3256
3257 netdev_sent_queue(netdev, skb->len);
3258 skb_tx_timestamp(skb);
3259
3260 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261
3262 /* 82544 potentially requires twice as many data descriptors
3263 * in order to guarantee buffers don't end on evenly-aligned
3264 * dwords
3265 */
3266 if (adapter->pcix_82544)
3267 desc_needed += MAX_SKB_FRAGS + 1;
3268
3269 /* Make sure there is space in the ring for the next send. */
3270 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271
3272 if (!netdev_xmit_more() ||
3273 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3274 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3275 }
3276 } else {
3277 dev_kfree_skb_any(skb);
3278 tx_ring->buffer_info[first].time_stamp = 0;
3279 tx_ring->next_to_use = first;
3280 }
3281
3282 return NETDEV_TX_OK;
3283}
3284
3285#define NUM_REGS 38 /* 1 based count */
3286static void e1000_regdump(struct e1000_adapter *adapter)
3287{
3288 struct e1000_hw *hw = &adapter->hw;
3289 u32 regs[NUM_REGS];
3290 u32 *regs_buff = regs;
3291 int i = 0;
3292
3293 static const char * const reg_name[] = {
3294 "CTRL", "STATUS",
3295 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297 "TIDV", "TXDCTL", "TADV", "TARC0",
3298 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299 "TXDCTL1", "TARC1",
3300 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303 };
3304
3305 regs_buff[0] = er32(CTRL);
3306 regs_buff[1] = er32(STATUS);
3307
3308 regs_buff[2] = er32(RCTL);
3309 regs_buff[3] = er32(RDLEN);
3310 regs_buff[4] = er32(RDH);
3311 regs_buff[5] = er32(RDT);
3312 regs_buff[6] = er32(RDTR);
3313
3314 regs_buff[7] = er32(TCTL);
3315 regs_buff[8] = er32(TDBAL);
3316 regs_buff[9] = er32(TDBAH);
3317 regs_buff[10] = er32(TDLEN);
3318 regs_buff[11] = er32(TDH);
3319 regs_buff[12] = er32(TDT);
3320 regs_buff[13] = er32(TIDV);
3321 regs_buff[14] = er32(TXDCTL);
3322 regs_buff[15] = er32(TADV);
3323 regs_buff[16] = er32(TARC0);
3324
3325 regs_buff[17] = er32(TDBAL1);
3326 regs_buff[18] = er32(TDBAH1);
3327 regs_buff[19] = er32(TDLEN1);
3328 regs_buff[20] = er32(TDH1);
3329 regs_buff[21] = er32(TDT1);
3330 regs_buff[22] = er32(TXDCTL1);
3331 regs_buff[23] = er32(TARC1);
3332 regs_buff[24] = er32(CTRL_EXT);
3333 regs_buff[25] = er32(ERT);
3334 regs_buff[26] = er32(RDBAL0);
3335 regs_buff[27] = er32(RDBAH0);
3336 regs_buff[28] = er32(TDFH);
3337 regs_buff[29] = er32(TDFT);
3338 regs_buff[30] = er32(TDFHS);
3339 regs_buff[31] = er32(TDFTS);
3340 regs_buff[32] = er32(TDFPC);
3341 regs_buff[33] = er32(RDFH);
3342 regs_buff[34] = er32(RDFT);
3343 regs_buff[35] = er32(RDFHS);
3344 regs_buff[36] = er32(RDFTS);
3345 regs_buff[37] = er32(RDFPC);
3346
3347 pr_info("Register dump\n");
3348 for (i = 0; i < NUM_REGS; i++)
3349 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3350}
3351
3352/*
3353 * e1000_dump: Print registers, tx ring and rx ring
3354 */
3355static void e1000_dump(struct e1000_adapter *adapter)
3356{
3357 /* this code doesn't handle multiple rings */
3358 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360 int i;
3361
3362 if (!netif_msg_hw(adapter))
3363 return;
3364
3365 /* Print Registers */
3366 e1000_regdump(adapter);
3367
3368 /* transmit dump */
3369 pr_info("TX Desc ring0 dump\n");
3370
3371 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372 *
3373 * Legacy Transmit Descriptor
3374 * +--------------------------------------------------------------+
3375 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3376 * +--------------------------------------------------------------+
3377 * 8 | Special | CSS | Status | CMD | CSO | Length |
3378 * +--------------------------------------------------------------+
3379 * 63 48 47 36 35 32 31 24 23 16 15 0
3380 *
3381 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382 * 63 48 47 40 39 32 31 16 15 8 7 0
3383 * +----------------------------------------------------------------+
3384 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3385 * +----------------------------------------------------------------+
3386 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3387 * +----------------------------------------------------------------+
3388 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3389 *
3390 * Extended Data Descriptor (DTYP=0x1)
3391 * +----------------------------------------------------------------+
3392 * 0 | Buffer Address [63:0] |
3393 * +----------------------------------------------------------------+
3394 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3395 * +----------------------------------------------------------------+
3396 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3397 */
3398 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3400
3401 if (!netif_msg_tx_done(adapter))
3402 goto rx_ring_summary;
3403
3404 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407 struct my_u { __le64 a; __le64 b; };
3408 struct my_u *u = (struct my_u *)tx_desc;
3409 const char *type;
3410
3411 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412 type = "NTC/U";
3413 else if (i == tx_ring->next_to_use)
3414 type = "NTU";
3415 else if (i == tx_ring->next_to_clean)
3416 type = "NTC";
3417 else
3418 type = "";
3419
3420 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3421 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422 le64_to_cpu(u->a), le64_to_cpu(u->b),
3423 (u64)buffer_info->dma, buffer_info->length,
3424 buffer_info->next_to_watch,
3425 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3426 }
3427
3428rx_ring_summary:
3429 /* receive dump */
3430 pr_info("\nRX Desc ring dump\n");
3431
3432 /* Legacy Receive Descriptor Format
3433 *
3434 * +-----------------------------------------------------+
3435 * | Buffer Address [63:0] |
3436 * +-----------------------------------------------------+
3437 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3438 * +-----------------------------------------------------+
3439 * 63 48 47 40 39 32 31 16 15 0
3440 */
3441 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3442
3443 if (!netif_msg_rx_status(adapter))
3444 goto exit;
3445
3446 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3448 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449 struct my_u { __le64 a; __le64 b; };
3450 struct my_u *u = (struct my_u *)rx_desc;
3451 const char *type;
3452
3453 if (i == rx_ring->next_to_use)
3454 type = "NTU";
3455 else if (i == rx_ring->next_to_clean)
3456 type = "NTC";
3457 else
3458 type = "";
3459
3460 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3461 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3462 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463 } /* for */
3464
3465 /* dump the descriptor caches */
3466 /* rx */
3467 pr_info("Rx descriptor cache in 64bit format\n");
3468 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470 i,
3471 readl(adapter->hw.hw_addr + i+4),
3472 readl(adapter->hw.hw_addr + i),
3473 readl(adapter->hw.hw_addr + i+12),
3474 readl(adapter->hw.hw_addr + i+8));
3475 }
3476 /* tx */
3477 pr_info("Tx descriptor cache in 64bit format\n");
3478 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480 i,
3481 readl(adapter->hw.hw_addr + i+4),
3482 readl(adapter->hw.hw_addr + i),
3483 readl(adapter->hw.hw_addr + i+12),
3484 readl(adapter->hw.hw_addr + i+8));
3485 }
3486exit:
3487 return;
3488}
3489
3490/**
3491 * e1000_tx_timeout - Respond to a Tx Hang
3492 * @netdev: network interface device structure
3493 * @txqueue: number of the Tx queue that hung (unused)
3494 **/
3495static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496{
3497 struct e1000_adapter *adapter = netdev_priv(netdev);
3498
3499 /* Do the reset outside of interrupt context */
3500 adapter->tx_timeout_count++;
3501 schedule_work(&adapter->reset_task);
3502}
3503
3504static void e1000_reset_task(struct work_struct *work)
3505{
3506 struct e1000_adapter *adapter =
3507 container_of(work, struct e1000_adapter, reset_task);
3508
3509 e_err(drv, "Reset adapter\n");
3510 e1000_reinit_locked(adapter);
3511}
3512
3513/**
3514 * e1000_change_mtu - Change the Maximum Transfer Unit
3515 * @netdev: network interface device structure
3516 * @new_mtu: new value for maximum frame size
3517 *
3518 * Returns 0 on success, negative on failure
3519 **/
3520static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521{
3522 struct e1000_adapter *adapter = netdev_priv(netdev);
3523 struct e1000_hw *hw = &adapter->hw;
3524 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525
3526 /* Adapter-specific max frame size limits. */
3527 switch (hw->mac_type) {
3528 case e1000_undefined ... e1000_82542_rev2_1:
3529 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530 e_err(probe, "Jumbo Frames not supported.\n");
3531 return -EINVAL;
3532 }
3533 break;
3534 default:
3535 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536 break;
3537 }
3538
3539 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540 msleep(1);
3541 /* e1000_down has a dependency on max_frame_size */
3542 hw->max_frame_size = max_frame;
3543 if (netif_running(netdev)) {
3544 /* prevent buffers from being reallocated */
3545 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546 e1000_down(adapter);
3547 }
3548
3549 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3550 * means we reserve 2 more, this pushes us to allocate from the next
3551 * larger slab size.
3552 * i.e. RXBUFFER_2048 --> size-4096 slab
3553 * however with the new *_jumbo_rx* routines, jumbo receives will use
3554 * fragmented skbs
3555 */
3556
3557 if (max_frame <= E1000_RXBUFFER_2048)
3558 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559 else
3560#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563 adapter->rx_buffer_len = PAGE_SIZE;
3564#endif
3565
3566 /* adjust allocation if LPE protects us, and we aren't using SBP */
3567 if (!hw->tbi_compatibility_on &&
3568 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571
3572 netdev_dbg(netdev, "changing MTU from %d to %d\n",
3573 netdev->mtu, new_mtu);
3574 netdev->mtu = new_mtu;
3575
3576 if (netif_running(netdev))
3577 e1000_up(adapter);
3578 else
3579 e1000_reset(adapter);
3580
3581 clear_bit(__E1000_RESETTING, &adapter->flags);
3582
3583 return 0;
3584}
3585
3586/**
3587 * e1000_update_stats - Update the board statistics counters
3588 * @adapter: board private structure
3589 **/
3590void e1000_update_stats(struct e1000_adapter *adapter)
3591{
3592 struct net_device *netdev = adapter->netdev;
3593 struct e1000_hw *hw = &adapter->hw;
3594 struct pci_dev *pdev = adapter->pdev;
3595 unsigned long flags;
3596 u16 phy_tmp;
3597
3598#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599
3600 /* Prevent stats update while adapter is being reset, or if the pci
3601 * connection is down.
3602 */
3603 if (adapter->link_speed == 0)
3604 return;
3605 if (pci_channel_offline(pdev))
3606 return;
3607
3608 spin_lock_irqsave(&adapter->stats_lock, flags);
3609
3610 /* these counters are modified from e1000_tbi_adjust_stats,
3611 * called from the interrupt context, so they must only
3612 * be written while holding adapter->stats_lock
3613 */
3614
3615 adapter->stats.crcerrs += er32(CRCERRS);
3616 adapter->stats.gprc += er32(GPRC);
3617 adapter->stats.gorcl += er32(GORCL);
3618 adapter->stats.gorch += er32(GORCH);
3619 adapter->stats.bprc += er32(BPRC);
3620 adapter->stats.mprc += er32(MPRC);
3621 adapter->stats.roc += er32(ROC);
3622
3623 adapter->stats.prc64 += er32(PRC64);
3624 adapter->stats.prc127 += er32(PRC127);
3625 adapter->stats.prc255 += er32(PRC255);
3626 adapter->stats.prc511 += er32(PRC511);
3627 adapter->stats.prc1023 += er32(PRC1023);
3628 adapter->stats.prc1522 += er32(PRC1522);
3629
3630 adapter->stats.symerrs += er32(SYMERRS);
3631 adapter->stats.mpc += er32(MPC);
3632 adapter->stats.scc += er32(SCC);
3633 adapter->stats.ecol += er32(ECOL);
3634 adapter->stats.mcc += er32(MCC);
3635 adapter->stats.latecol += er32(LATECOL);
3636 adapter->stats.dc += er32(DC);
3637 adapter->stats.sec += er32(SEC);
3638 adapter->stats.rlec += er32(RLEC);
3639 adapter->stats.xonrxc += er32(XONRXC);
3640 adapter->stats.xontxc += er32(XONTXC);
3641 adapter->stats.xoffrxc += er32(XOFFRXC);
3642 adapter->stats.xofftxc += er32(XOFFTXC);
3643 adapter->stats.fcruc += er32(FCRUC);
3644 adapter->stats.gptc += er32(GPTC);
3645 adapter->stats.gotcl += er32(GOTCL);
3646 adapter->stats.gotch += er32(GOTCH);
3647 adapter->stats.rnbc += er32(RNBC);
3648 adapter->stats.ruc += er32(RUC);
3649 adapter->stats.rfc += er32(RFC);
3650 adapter->stats.rjc += er32(RJC);
3651 adapter->stats.torl += er32(TORL);
3652 adapter->stats.torh += er32(TORH);
3653 adapter->stats.totl += er32(TOTL);
3654 adapter->stats.toth += er32(TOTH);
3655 adapter->stats.tpr += er32(TPR);
3656
3657 adapter->stats.ptc64 += er32(PTC64);
3658 adapter->stats.ptc127 += er32(PTC127);
3659 adapter->stats.ptc255 += er32(PTC255);
3660 adapter->stats.ptc511 += er32(PTC511);
3661 adapter->stats.ptc1023 += er32(PTC1023);
3662 adapter->stats.ptc1522 += er32(PTC1522);
3663
3664 adapter->stats.mptc += er32(MPTC);
3665 adapter->stats.bptc += er32(BPTC);
3666
3667 /* used for adaptive IFS */
3668
3669 hw->tx_packet_delta = er32(TPT);
3670 adapter->stats.tpt += hw->tx_packet_delta;
3671 hw->collision_delta = er32(COLC);
3672 adapter->stats.colc += hw->collision_delta;
3673
3674 if (hw->mac_type >= e1000_82543) {
3675 adapter->stats.algnerrc += er32(ALGNERRC);
3676 adapter->stats.rxerrc += er32(RXERRC);
3677 adapter->stats.tncrs += er32(TNCRS);
3678 adapter->stats.cexterr += er32(CEXTERR);
3679 adapter->stats.tsctc += er32(TSCTC);
3680 adapter->stats.tsctfc += er32(TSCTFC);
3681 }
3682
3683 /* Fill out the OS statistics structure */
3684 netdev->stats.multicast = adapter->stats.mprc;
3685 netdev->stats.collisions = adapter->stats.colc;
3686
3687 /* Rx Errors */
3688
3689 /* RLEC on some newer hardware can be incorrect so build
3690 * our own version based on RUC and ROC
3691 */
3692 netdev->stats.rx_errors = adapter->stats.rxerrc +
3693 adapter->stats.crcerrs + adapter->stats.algnerrc +
3694 adapter->stats.ruc + adapter->stats.roc +
3695 adapter->stats.cexterr;
3696 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701
3702 /* Tx Errors */
3703 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704 netdev->stats.tx_errors = adapter->stats.txerrc;
3705 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706 netdev->stats.tx_window_errors = adapter->stats.latecol;
3707 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708 if (hw->bad_tx_carr_stats_fd &&
3709 adapter->link_duplex == FULL_DUPLEX) {
3710 netdev->stats.tx_carrier_errors = 0;
3711 adapter->stats.tncrs = 0;
3712 }
3713
3714 /* Tx Dropped needs to be maintained elsewhere */
3715
3716 /* Phy Stats */
3717 if (hw->media_type == e1000_media_type_copper) {
3718 if ((adapter->link_speed == SPEED_1000) &&
3719 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721 adapter->phy_stats.idle_errors += phy_tmp;
3722 }
3723
3724 if ((hw->mac_type <= e1000_82546) &&
3725 (hw->phy_type == e1000_phy_m88) &&
3726 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727 adapter->phy_stats.receive_errors += phy_tmp;
3728 }
3729
3730 /* Management Stats */
3731 if (hw->has_smbus) {
3732 adapter->stats.mgptc += er32(MGTPTC);
3733 adapter->stats.mgprc += er32(MGTPRC);
3734 adapter->stats.mgpdc += er32(MGTPDC);
3735 }
3736
3737 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738}
3739
3740/**
3741 * e1000_intr - Interrupt Handler
3742 * @irq: interrupt number
3743 * @data: pointer to a network interface device structure
3744 **/
3745static irqreturn_t e1000_intr(int irq, void *data)
3746{
3747 struct net_device *netdev = data;
3748 struct e1000_adapter *adapter = netdev_priv(netdev);
3749 struct e1000_hw *hw = &adapter->hw;
3750 u32 icr = er32(ICR);
3751
3752 if (unlikely((!icr)))
3753 return IRQ_NONE; /* Not our interrupt */
3754
3755 /* we might have caused the interrupt, but the above
3756 * read cleared it, and just in case the driver is
3757 * down there is nothing to do so return handled
3758 */
3759 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760 return IRQ_HANDLED;
3761
3762 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763 hw->get_link_status = 1;
3764 /* guard against interrupt when we're going down */
3765 if (!test_bit(__E1000_DOWN, &adapter->flags))
3766 schedule_delayed_work(&adapter->watchdog_task, 1);
3767 }
3768
3769 /* disable interrupts, without the synchronize_irq bit */
3770 ew32(IMC, ~0);
3771 E1000_WRITE_FLUSH();
3772
3773 if (likely(napi_schedule_prep(&adapter->napi))) {
3774 adapter->total_tx_bytes = 0;
3775 adapter->total_tx_packets = 0;
3776 adapter->total_rx_bytes = 0;
3777 adapter->total_rx_packets = 0;
3778 __napi_schedule(&adapter->napi);
3779 } else {
3780 /* this really should not happen! if it does it is basically a
3781 * bug, but not a hard error, so enable ints and continue
3782 */
3783 if (!test_bit(__E1000_DOWN, &adapter->flags))
3784 e1000_irq_enable(adapter);
3785 }
3786
3787 return IRQ_HANDLED;
3788}
3789
3790/**
3791 * e1000_clean - NAPI Rx polling callback
3792 * @napi: napi struct containing references to driver info
3793 * @budget: budget given to driver for receive packets
3794 **/
3795static int e1000_clean(struct napi_struct *napi, int budget)
3796{
3797 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3798 napi);
3799 int tx_clean_complete = 0, work_done = 0;
3800
3801 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802
3803 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804
3805 if (!tx_clean_complete || work_done == budget)
3806 return budget;
3807
3808 /* Exit the polling mode, but don't re-enable interrupts if stack might
3809 * poll us due to busy-polling
3810 */
3811 if (likely(napi_complete_done(napi, work_done))) {
3812 if (likely(adapter->itr_setting & 3))
3813 e1000_set_itr(adapter);
3814 if (!test_bit(__E1000_DOWN, &adapter->flags))
3815 e1000_irq_enable(adapter);
3816 }
3817
3818 return work_done;
3819}
3820
3821/**
3822 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823 * @adapter: board private structure
3824 * @tx_ring: ring to clean
3825 **/
3826static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827 struct e1000_tx_ring *tx_ring)
3828{
3829 struct e1000_hw *hw = &adapter->hw;
3830 struct net_device *netdev = adapter->netdev;
3831 struct e1000_tx_desc *tx_desc, *eop_desc;
3832 struct e1000_tx_buffer *buffer_info;
3833 unsigned int i, eop;
3834 unsigned int count = 0;
3835 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3836 unsigned int bytes_compl = 0, pkts_compl = 0;
3837
3838 i = tx_ring->next_to_clean;
3839 eop = tx_ring->buffer_info[i].next_to_watch;
3840 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841
3842 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843 (count < tx_ring->count)) {
3844 bool cleaned = false;
3845 dma_rmb(); /* read buffer_info after eop_desc */
3846 for ( ; !cleaned; count++) {
3847 tx_desc = E1000_TX_DESC(*tx_ring, i);
3848 buffer_info = &tx_ring->buffer_info[i];
3849 cleaned = (i == eop);
3850
3851 if (cleaned) {
3852 total_tx_packets += buffer_info->segs;
3853 total_tx_bytes += buffer_info->bytecount;
3854 if (buffer_info->skb) {
3855 bytes_compl += buffer_info->skb->len;
3856 pkts_compl++;
3857 }
3858
3859 }
3860 e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3861 64);
3862 tx_desc->upper.data = 0;
3863
3864 if (unlikely(++i == tx_ring->count))
3865 i = 0;
3866 }
3867
3868 eop = tx_ring->buffer_info[i].next_to_watch;
3869 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870 }
3871
3872 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3873 * which will reuse the cleaned buffers.
3874 */
3875 smp_store_release(&tx_ring->next_to_clean, i);
3876
3877 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3878
3879#define TX_WAKE_THRESHOLD 32
3880 if (unlikely(count && netif_carrier_ok(netdev) &&
3881 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3882 /* Make sure that anybody stopping the queue after this
3883 * sees the new next_to_clean.
3884 */
3885 smp_mb();
3886
3887 if (netif_queue_stopped(netdev) &&
3888 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3889 netif_wake_queue(netdev);
3890 ++adapter->restart_queue;
3891 }
3892 }
3893
3894 if (adapter->detect_tx_hung) {
3895 /* Detect a transmit hang in hardware, this serializes the
3896 * check with the clearing of time_stamp and movement of i
3897 */
3898 adapter->detect_tx_hung = false;
3899 if (tx_ring->buffer_info[eop].time_stamp &&
3900 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3901 (adapter->tx_timeout_factor * HZ)) &&
3902 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3903
3904 /* detected Tx unit hang */
3905 e_err(drv, "Detected Tx Unit Hang\n"
3906 " Tx Queue <%lu>\n"
3907 " TDH <%x>\n"
3908 " TDT <%x>\n"
3909 " next_to_use <%x>\n"
3910 " next_to_clean <%x>\n"
3911 "buffer_info[next_to_clean]\n"
3912 " time_stamp <%lx>\n"
3913 " next_to_watch <%x>\n"
3914 " jiffies <%lx>\n"
3915 " next_to_watch.status <%x>\n",
3916 (unsigned long)(tx_ring - adapter->tx_ring),
3917 readl(hw->hw_addr + tx_ring->tdh),
3918 readl(hw->hw_addr + tx_ring->tdt),
3919 tx_ring->next_to_use,
3920 tx_ring->next_to_clean,
3921 tx_ring->buffer_info[eop].time_stamp,
3922 eop,
3923 jiffies,
3924 eop_desc->upper.fields.status);
3925 e1000_dump(adapter);
3926 netif_stop_queue(netdev);
3927 }
3928 }
3929 adapter->total_tx_bytes += total_tx_bytes;
3930 adapter->total_tx_packets += total_tx_packets;
3931 netdev->stats.tx_bytes += total_tx_bytes;
3932 netdev->stats.tx_packets += total_tx_packets;
3933 return count < tx_ring->count;
3934}
3935
3936/**
3937 * e1000_rx_checksum - Receive Checksum Offload for 82543
3938 * @adapter: board private structure
3939 * @status_err: receive descriptor status and error fields
3940 * @csum: receive descriptor csum field
3941 * @skb: socket buffer with received data
3942 **/
3943static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3944 u32 csum, struct sk_buff *skb)
3945{
3946 struct e1000_hw *hw = &adapter->hw;
3947 u16 status = (u16)status_err;
3948 u8 errors = (u8)(status_err >> 24);
3949
3950 skb_checksum_none_assert(skb);
3951
3952 /* 82543 or newer only */
3953 if (unlikely(hw->mac_type < e1000_82543))
3954 return;
3955 /* Ignore Checksum bit is set */
3956 if (unlikely(status & E1000_RXD_STAT_IXSM))
3957 return;
3958 /* TCP/UDP checksum error bit is set */
3959 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3960 /* let the stack verify checksum errors */
3961 adapter->hw_csum_err++;
3962 return;
3963 }
3964 /* TCP/UDP Checksum has not been calculated */
3965 if (!(status & E1000_RXD_STAT_TCPCS))
3966 return;
3967
3968 /* It must be a TCP or UDP packet with a valid checksum */
3969 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3970 /* TCP checksum is good */
3971 skb->ip_summed = CHECKSUM_UNNECESSARY;
3972 }
3973 adapter->hw_csum_good++;
3974}
3975
3976/**
3977 * e1000_consume_page - helper function for jumbo Rx path
3978 * @bi: software descriptor shadow data
3979 * @skb: skb being modified
3980 * @length: length of data being added
3981 **/
3982static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3983 u16 length)
3984{
3985 bi->rxbuf.page = NULL;
3986 skb->len += length;
3987 skb->data_len += length;
3988 skb->truesize += PAGE_SIZE;
3989}
3990
3991/**
3992 * e1000_receive_skb - helper function to handle rx indications
3993 * @adapter: board private structure
3994 * @status: descriptor status field as written by hardware
3995 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3996 * @skb: pointer to sk_buff to be indicated to stack
3997 */
3998static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3999 __le16 vlan, struct sk_buff *skb)
4000{
4001 skb->protocol = eth_type_trans(skb, adapter->netdev);
4002
4003 if (status & E1000_RXD_STAT_VP) {
4004 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4005
4006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4007 }
4008 napi_gro_receive(&adapter->napi, skb);
4009}
4010
4011/**
4012 * e1000_tbi_adjust_stats
4013 * @hw: Struct containing variables accessed by shared code
4014 * @stats: point to stats struct
4015 * @frame_len: The length of the frame in question
4016 * @mac_addr: The Ethernet destination address of the frame in question
4017 *
4018 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4019 */
4020static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4021 struct e1000_hw_stats *stats,
4022 u32 frame_len, const u8 *mac_addr)
4023{
4024 u64 carry_bit;
4025
4026 /* First adjust the frame length. */
4027 frame_len--;
4028 /* We need to adjust the statistics counters, since the hardware
4029 * counters overcount this packet as a CRC error and undercount
4030 * the packet as a good packet
4031 */
4032 /* This packet should not be counted as a CRC error. */
4033 stats->crcerrs--;
4034 /* This packet does count as a Good Packet Received. */
4035 stats->gprc++;
4036
4037 /* Adjust the Good Octets received counters */
4038 carry_bit = 0x80000000 & stats->gorcl;
4039 stats->gorcl += frame_len;
4040 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4041 * Received Count) was one before the addition,
4042 * AND it is zero after, then we lost the carry out,
4043 * need to add one to Gorch (Good Octets Received Count High).
4044 * This could be simplified if all environments supported
4045 * 64-bit integers.
4046 */
4047 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4048 stats->gorch++;
4049 /* Is this a broadcast or multicast? Check broadcast first,
4050 * since the test for a multicast frame will test positive on
4051 * a broadcast frame.
4052 */
4053 if (is_broadcast_ether_addr(mac_addr))
4054 stats->bprc++;
4055 else if (is_multicast_ether_addr(mac_addr))
4056 stats->mprc++;
4057
4058 if (frame_len == hw->max_frame_size) {
4059 /* In this case, the hardware has overcounted the number of
4060 * oversize frames.
4061 */
4062 if (stats->roc > 0)
4063 stats->roc--;
4064 }
4065
4066 /* Adjust the bin counters when the extra byte put the frame in the
4067 * wrong bin. Remember that the frame_len was adjusted above.
4068 */
4069 if (frame_len == 64) {
4070 stats->prc64++;
4071 stats->prc127--;
4072 } else if (frame_len == 127) {
4073 stats->prc127++;
4074 stats->prc255--;
4075 } else if (frame_len == 255) {
4076 stats->prc255++;
4077 stats->prc511--;
4078 } else if (frame_len == 511) {
4079 stats->prc511++;
4080 stats->prc1023--;
4081 } else if (frame_len == 1023) {
4082 stats->prc1023++;
4083 stats->prc1522--;
4084 } else if (frame_len == 1522) {
4085 stats->prc1522++;
4086 }
4087}
4088
4089static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4090 u8 status, u8 errors,
4091 u32 length, const u8 *data)
4092{
4093 struct e1000_hw *hw = &adapter->hw;
4094 u8 last_byte = *(data + length - 1);
4095
4096 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4097 unsigned long irq_flags;
4098
4099 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4100 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4101 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4102
4103 return true;
4104 }
4105
4106 return false;
4107}
4108
4109static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4110 unsigned int bufsz)
4111{
4112 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4113
4114 if (unlikely(!skb))
4115 adapter->alloc_rx_buff_failed++;
4116 return skb;
4117}
4118
4119/**
4120 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4121 * @adapter: board private structure
4122 * @rx_ring: ring to clean
4123 * @work_done: amount of napi work completed this call
4124 * @work_to_do: max amount of work allowed for this call to do
4125 *
4126 * the return value indicates whether actual cleaning was done, there
4127 * is no guarantee that everything was cleaned
4128 */
4129static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4130 struct e1000_rx_ring *rx_ring,
4131 int *work_done, int work_to_do)
4132{
4133 struct net_device *netdev = adapter->netdev;
4134 struct pci_dev *pdev = adapter->pdev;
4135 struct e1000_rx_desc *rx_desc, *next_rxd;
4136 struct e1000_rx_buffer *buffer_info, *next_buffer;
4137 u32 length;
4138 unsigned int i;
4139 int cleaned_count = 0;
4140 bool cleaned = false;
4141 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4142
4143 i = rx_ring->next_to_clean;
4144 rx_desc = E1000_RX_DESC(*rx_ring, i);
4145 buffer_info = &rx_ring->buffer_info[i];
4146
4147 while (rx_desc->status & E1000_RXD_STAT_DD) {
4148 struct sk_buff *skb;
4149 u8 status;
4150
4151 if (*work_done >= work_to_do)
4152 break;
4153 (*work_done)++;
4154 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4155
4156 status = rx_desc->status;
4157
4158 if (++i == rx_ring->count)
4159 i = 0;
4160
4161 next_rxd = E1000_RX_DESC(*rx_ring, i);
4162 prefetch(next_rxd);
4163
4164 next_buffer = &rx_ring->buffer_info[i];
4165
4166 cleaned = true;
4167 cleaned_count++;
4168 dma_unmap_page(&pdev->dev, buffer_info->dma,
4169 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4170 buffer_info->dma = 0;
4171
4172 length = le16_to_cpu(rx_desc->length);
4173
4174 /* errors is only valid for DD + EOP descriptors */
4175 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4176 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4177 u8 *mapped = page_address(buffer_info->rxbuf.page);
4178
4179 if (e1000_tbi_should_accept(adapter, status,
4180 rx_desc->errors,
4181 length, mapped)) {
4182 length--;
4183 } else if (netdev->features & NETIF_F_RXALL) {
4184 goto process_skb;
4185 } else {
4186 /* an error means any chain goes out the window
4187 * too
4188 */
4189 dev_kfree_skb(rx_ring->rx_skb_top);
4190 rx_ring->rx_skb_top = NULL;
4191 goto next_desc;
4192 }
4193 }
4194
4195#define rxtop rx_ring->rx_skb_top
4196process_skb:
4197 if (!(status & E1000_RXD_STAT_EOP)) {
4198 /* this descriptor is only the beginning (or middle) */
4199 if (!rxtop) {
4200 /* this is the beginning of a chain */
4201 rxtop = napi_get_frags(&adapter->napi);
4202 if (!rxtop)
4203 break;
4204
4205 skb_fill_page_desc(rxtop, 0,
4206 buffer_info->rxbuf.page,
4207 0, length);
4208 } else {
4209 /* this is the middle of a chain */
4210 skb_fill_page_desc(rxtop,
4211 skb_shinfo(rxtop)->nr_frags,
4212 buffer_info->rxbuf.page, 0, length);
4213 }
4214 e1000_consume_page(buffer_info, rxtop, length);
4215 goto next_desc;
4216 } else {
4217 if (rxtop) {
4218 /* end of the chain */
4219 skb_fill_page_desc(rxtop,
4220 skb_shinfo(rxtop)->nr_frags,
4221 buffer_info->rxbuf.page, 0, length);
4222 skb = rxtop;
4223 rxtop = NULL;
4224 e1000_consume_page(buffer_info, skb, length);
4225 } else {
4226 struct page *p;
4227 /* no chain, got EOP, this buf is the packet
4228 * copybreak to save the put_page/alloc_page
4229 */
4230 p = buffer_info->rxbuf.page;
4231 if (length <= copybreak) {
4232 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4233 length -= 4;
4234 skb = e1000_alloc_rx_skb(adapter,
4235 length);
4236 if (!skb)
4237 break;
4238
4239 memcpy(skb_tail_pointer(skb),
4240 page_address(p), length);
4241
4242 /* re-use the page, so don't erase
4243 * buffer_info->rxbuf.page
4244 */
4245 skb_put(skb, length);
4246 e1000_rx_checksum(adapter,
4247 status | rx_desc->errors << 24,
4248 le16_to_cpu(rx_desc->csum), skb);
4249
4250 total_rx_bytes += skb->len;
4251 total_rx_packets++;
4252
4253 e1000_receive_skb(adapter, status,
4254 rx_desc->special, skb);
4255 goto next_desc;
4256 } else {
4257 skb = napi_get_frags(&adapter->napi);
4258 if (!skb) {
4259 adapter->alloc_rx_buff_failed++;
4260 break;
4261 }
4262 skb_fill_page_desc(skb, 0, p, 0,
4263 length);
4264 e1000_consume_page(buffer_info, skb,
4265 length);
4266 }
4267 }
4268 }
4269
4270 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4271 e1000_rx_checksum(adapter,
4272 (u32)(status) |
4273 ((u32)(rx_desc->errors) << 24),
4274 le16_to_cpu(rx_desc->csum), skb);
4275
4276 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4277 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4278 pskb_trim(skb, skb->len - 4);
4279 total_rx_packets++;
4280
4281 if (status & E1000_RXD_STAT_VP) {
4282 __le16 vlan = rx_desc->special;
4283 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4284
4285 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4286 }
4287
4288 napi_gro_frags(&adapter->napi);
4289
4290next_desc:
4291 rx_desc->status = 0;
4292
4293 /* return some buffers to hardware, one at a time is too slow */
4294 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4295 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4296 cleaned_count = 0;
4297 }
4298
4299 /* use prefetched values */
4300 rx_desc = next_rxd;
4301 buffer_info = next_buffer;
4302 }
4303 rx_ring->next_to_clean = i;
4304
4305 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4306 if (cleaned_count)
4307 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4308
4309 adapter->total_rx_packets += total_rx_packets;
4310 adapter->total_rx_bytes += total_rx_bytes;
4311 netdev->stats.rx_bytes += total_rx_bytes;
4312 netdev->stats.rx_packets += total_rx_packets;
4313 return cleaned;
4314}
4315
4316/* this should improve performance for small packets with large amounts
4317 * of reassembly being done in the stack
4318 */
4319static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4320 struct e1000_rx_buffer *buffer_info,
4321 u32 length, const void *data)
4322{
4323 struct sk_buff *skb;
4324
4325 if (length > copybreak)
4326 return NULL;
4327
4328 skb = e1000_alloc_rx_skb(adapter, length);
4329 if (!skb)
4330 return NULL;
4331
4332 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4333 length, DMA_FROM_DEVICE);
4334
4335 skb_put_data(skb, data, length);
4336
4337 return skb;
4338}
4339
4340/**
4341 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4342 * @adapter: board private structure
4343 * @rx_ring: ring to clean
4344 * @work_done: amount of napi work completed this call
4345 * @work_to_do: max amount of work allowed for this call to do
4346 */
4347static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4348 struct e1000_rx_ring *rx_ring,
4349 int *work_done, int work_to_do)
4350{
4351 struct net_device *netdev = adapter->netdev;
4352 struct pci_dev *pdev = adapter->pdev;
4353 struct e1000_rx_desc *rx_desc, *next_rxd;
4354 struct e1000_rx_buffer *buffer_info, *next_buffer;
4355 u32 length;
4356 unsigned int i;
4357 int cleaned_count = 0;
4358 bool cleaned = false;
4359 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4360
4361 i = rx_ring->next_to_clean;
4362 rx_desc = E1000_RX_DESC(*rx_ring, i);
4363 buffer_info = &rx_ring->buffer_info[i];
4364
4365 while (rx_desc->status & E1000_RXD_STAT_DD) {
4366 struct sk_buff *skb;
4367 u8 *data;
4368 u8 status;
4369
4370 if (*work_done >= work_to_do)
4371 break;
4372 (*work_done)++;
4373 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4374
4375 status = rx_desc->status;
4376 length = le16_to_cpu(rx_desc->length);
4377
4378 data = buffer_info->rxbuf.data;
4379 prefetch(data);
4380 skb = e1000_copybreak(adapter, buffer_info, length, data);
4381 if (!skb) {
4382 unsigned int frag_len = e1000_frag_len(adapter);
4383
4384 skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4385 if (!skb) {
4386 adapter->alloc_rx_buff_failed++;
4387 break;
4388 }
4389
4390 skb_reserve(skb, E1000_HEADROOM);
4391 dma_unmap_single(&pdev->dev, buffer_info->dma,
4392 adapter->rx_buffer_len,
4393 DMA_FROM_DEVICE);
4394 buffer_info->dma = 0;
4395 buffer_info->rxbuf.data = NULL;
4396 }
4397
4398 if (++i == rx_ring->count)
4399 i = 0;
4400
4401 next_rxd = E1000_RX_DESC(*rx_ring, i);
4402 prefetch(next_rxd);
4403
4404 next_buffer = &rx_ring->buffer_info[i];
4405
4406 cleaned = true;
4407 cleaned_count++;
4408
4409 /* !EOP means multiple descriptors were used to store a single
4410 * packet, if thats the case we need to toss it. In fact, we
4411 * to toss every packet with the EOP bit clear and the next
4412 * frame that _does_ have the EOP bit set, as it is by
4413 * definition only a frame fragment
4414 */
4415 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4416 adapter->discarding = true;
4417
4418 if (adapter->discarding) {
4419 /* All receives must fit into a single buffer */
4420 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4421 dev_kfree_skb(skb);
4422 if (status & E1000_RXD_STAT_EOP)
4423 adapter->discarding = false;
4424 goto next_desc;
4425 }
4426
4427 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4428 if (e1000_tbi_should_accept(adapter, status,
4429 rx_desc->errors,
4430 length, data)) {
4431 length--;
4432 } else if (netdev->features & NETIF_F_RXALL) {
4433 goto process_skb;
4434 } else {
4435 dev_kfree_skb(skb);
4436 goto next_desc;
4437 }
4438 }
4439
4440process_skb:
4441 total_rx_bytes += (length - 4); /* don't count FCS */
4442 total_rx_packets++;
4443
4444 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4445 /* adjust length to remove Ethernet CRC, this must be
4446 * done after the TBI_ACCEPT workaround above
4447 */
4448 length -= 4;
4449
4450 if (buffer_info->rxbuf.data == NULL)
4451 skb_put(skb, length);
4452 else /* copybreak skb */
4453 skb_trim(skb, length);
4454
4455 /* Receive Checksum Offload */
4456 e1000_rx_checksum(adapter,
4457 (u32)(status) |
4458 ((u32)(rx_desc->errors) << 24),
4459 le16_to_cpu(rx_desc->csum), skb);
4460
4461 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4462
4463next_desc:
4464 rx_desc->status = 0;
4465
4466 /* return some buffers to hardware, one at a time is too slow */
4467 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4468 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4469 cleaned_count = 0;
4470 }
4471
4472 /* use prefetched values */
4473 rx_desc = next_rxd;
4474 buffer_info = next_buffer;
4475 }
4476 rx_ring->next_to_clean = i;
4477
4478 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4479 if (cleaned_count)
4480 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4481
4482 adapter->total_rx_packets += total_rx_packets;
4483 adapter->total_rx_bytes += total_rx_bytes;
4484 netdev->stats.rx_bytes += total_rx_bytes;
4485 netdev->stats.rx_packets += total_rx_packets;
4486 return cleaned;
4487}
4488
4489/**
4490 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4491 * @adapter: address of board private structure
4492 * @rx_ring: pointer to receive ring structure
4493 * @cleaned_count: number of buffers to allocate this pass
4494 **/
4495static void
4496e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4497 struct e1000_rx_ring *rx_ring, int cleaned_count)
4498{
4499 struct pci_dev *pdev = adapter->pdev;
4500 struct e1000_rx_desc *rx_desc;
4501 struct e1000_rx_buffer *buffer_info;
4502 unsigned int i;
4503
4504 i = rx_ring->next_to_use;
4505 buffer_info = &rx_ring->buffer_info[i];
4506
4507 while (cleaned_count--) {
4508 /* allocate a new page if necessary */
4509 if (!buffer_info->rxbuf.page) {
4510 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4511 if (unlikely(!buffer_info->rxbuf.page)) {
4512 adapter->alloc_rx_buff_failed++;
4513 break;
4514 }
4515 }
4516
4517 if (!buffer_info->dma) {
4518 buffer_info->dma = dma_map_page(&pdev->dev,
4519 buffer_info->rxbuf.page, 0,
4520 adapter->rx_buffer_len,
4521 DMA_FROM_DEVICE);
4522 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4523 put_page(buffer_info->rxbuf.page);
4524 buffer_info->rxbuf.page = NULL;
4525 buffer_info->dma = 0;
4526 adapter->alloc_rx_buff_failed++;
4527 break;
4528 }
4529 }
4530
4531 rx_desc = E1000_RX_DESC(*rx_ring, i);
4532 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4533
4534 if (unlikely(++i == rx_ring->count))
4535 i = 0;
4536 buffer_info = &rx_ring->buffer_info[i];
4537 }
4538
4539 if (likely(rx_ring->next_to_use != i)) {
4540 rx_ring->next_to_use = i;
4541 if (unlikely(i-- == 0))
4542 i = (rx_ring->count - 1);
4543
4544 /* Force memory writes to complete before letting h/w
4545 * know there are new descriptors to fetch. (Only
4546 * applicable for weak-ordered memory model archs,
4547 * such as IA-64).
4548 */
4549 dma_wmb();
4550 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4551 }
4552}
4553
4554/**
4555 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4556 * @adapter: address of board private structure
4557 * @rx_ring: pointer to ring struct
4558 * @cleaned_count: number of new Rx buffers to try to allocate
4559 **/
4560static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4561 struct e1000_rx_ring *rx_ring,
4562 int cleaned_count)
4563{
4564 struct e1000_hw *hw = &adapter->hw;
4565 struct pci_dev *pdev = adapter->pdev;
4566 struct e1000_rx_desc *rx_desc;
4567 struct e1000_rx_buffer *buffer_info;
4568 unsigned int i;
4569 unsigned int bufsz = adapter->rx_buffer_len;
4570
4571 i = rx_ring->next_to_use;
4572 buffer_info = &rx_ring->buffer_info[i];
4573
4574 while (cleaned_count--) {
4575 void *data;
4576
4577 if (buffer_info->rxbuf.data)
4578 goto skip;
4579
4580 data = e1000_alloc_frag(adapter);
4581 if (!data) {
4582 /* Better luck next round */
4583 adapter->alloc_rx_buff_failed++;
4584 break;
4585 }
4586
4587 /* Fix for errata 23, can't cross 64kB boundary */
4588 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4589 void *olddata = data;
4590 e_err(rx_err, "skb align check failed: %u bytes at "
4591 "%p\n", bufsz, data);
4592 /* Try again, without freeing the previous */
4593 data = e1000_alloc_frag(adapter);
4594 /* Failed allocation, critical failure */
4595 if (!data) {
4596 skb_free_frag(olddata);
4597 adapter->alloc_rx_buff_failed++;
4598 break;
4599 }
4600
4601 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4602 /* give up */
4603 skb_free_frag(data);
4604 skb_free_frag(olddata);
4605 adapter->alloc_rx_buff_failed++;
4606 break;
4607 }
4608
4609 /* Use new allocation */
4610 skb_free_frag(olddata);
4611 }
4612 buffer_info->dma = dma_map_single(&pdev->dev,
4613 data,
4614 adapter->rx_buffer_len,
4615 DMA_FROM_DEVICE);
4616 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4617 skb_free_frag(data);
4618 buffer_info->dma = 0;
4619 adapter->alloc_rx_buff_failed++;
4620 break;
4621 }
4622
4623 /* XXX if it was allocated cleanly it will never map to a
4624 * boundary crossing
4625 */
4626
4627 /* Fix for errata 23, can't cross 64kB boundary */
4628 if (!e1000_check_64k_bound(adapter,
4629 (void *)(unsigned long)buffer_info->dma,
4630 adapter->rx_buffer_len)) {
4631 e_err(rx_err, "dma align check failed: %u bytes at "
4632 "%p\n", adapter->rx_buffer_len,
4633 (void *)(unsigned long)buffer_info->dma);
4634
4635 dma_unmap_single(&pdev->dev, buffer_info->dma,
4636 adapter->rx_buffer_len,
4637 DMA_FROM_DEVICE);
4638
4639 skb_free_frag(data);
4640 buffer_info->rxbuf.data = NULL;
4641 buffer_info->dma = 0;
4642
4643 adapter->alloc_rx_buff_failed++;
4644 break;
4645 }
4646 buffer_info->rxbuf.data = data;
4647 skip:
4648 rx_desc = E1000_RX_DESC(*rx_ring, i);
4649 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4650
4651 if (unlikely(++i == rx_ring->count))
4652 i = 0;
4653 buffer_info = &rx_ring->buffer_info[i];
4654 }
4655
4656 if (likely(rx_ring->next_to_use != i)) {
4657 rx_ring->next_to_use = i;
4658 if (unlikely(i-- == 0))
4659 i = (rx_ring->count - 1);
4660
4661 /* Force memory writes to complete before letting h/w
4662 * know there are new descriptors to fetch. (Only
4663 * applicable for weak-ordered memory model archs,
4664 * such as IA-64).
4665 */
4666 dma_wmb();
4667 writel(i, hw->hw_addr + rx_ring->rdt);
4668 }
4669}
4670
4671/**
4672 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4673 * @adapter: address of board private structure
4674 **/
4675static void e1000_smartspeed(struct e1000_adapter *adapter)
4676{
4677 struct e1000_hw *hw = &adapter->hw;
4678 u16 phy_status;
4679 u16 phy_ctrl;
4680
4681 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4682 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4683 return;
4684
4685 if (adapter->smartspeed == 0) {
4686 /* If Master/Slave config fault is asserted twice,
4687 * we assume back-to-back
4688 */
4689 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4690 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4691 return;
4692 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4693 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4694 return;
4695 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4696 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4697 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4698 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4699 phy_ctrl);
4700 adapter->smartspeed++;
4701 if (!e1000_phy_setup_autoneg(hw) &&
4702 !e1000_read_phy_reg(hw, PHY_CTRL,
4703 &phy_ctrl)) {
4704 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4705 MII_CR_RESTART_AUTO_NEG);
4706 e1000_write_phy_reg(hw, PHY_CTRL,
4707 phy_ctrl);
4708 }
4709 }
4710 return;
4711 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4712 /* If still no link, perhaps using 2/3 pair cable */
4713 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4714 phy_ctrl |= CR_1000T_MS_ENABLE;
4715 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4716 if (!e1000_phy_setup_autoneg(hw) &&
4717 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4718 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4719 MII_CR_RESTART_AUTO_NEG);
4720 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4721 }
4722 }
4723 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4724 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4725 adapter->smartspeed = 0;
4726}
4727
4728/**
4729 * e1000_ioctl - handle ioctl calls
4730 * @netdev: pointer to our netdev
4731 * @ifr: pointer to interface request structure
4732 * @cmd: ioctl data
4733 **/
4734static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4735{
4736 switch (cmd) {
4737 case SIOCGMIIPHY:
4738 case SIOCGMIIREG:
4739 case SIOCSMIIREG:
4740 return e1000_mii_ioctl(netdev, ifr, cmd);
4741 default:
4742 return -EOPNOTSUPP;
4743 }
4744}
4745
4746/**
4747 * e1000_mii_ioctl -
4748 * @netdev: pointer to our netdev
4749 * @ifr: pointer to interface request structure
4750 * @cmd: ioctl data
4751 **/
4752static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4753 int cmd)
4754{
4755 struct e1000_adapter *adapter = netdev_priv(netdev);
4756 struct e1000_hw *hw = &adapter->hw;
4757 struct mii_ioctl_data *data = if_mii(ifr);
4758 int retval;
4759 u16 mii_reg;
4760 unsigned long flags;
4761
4762 if (hw->media_type != e1000_media_type_copper)
4763 return -EOPNOTSUPP;
4764
4765 switch (cmd) {
4766 case SIOCGMIIPHY:
4767 data->phy_id = hw->phy_addr;
4768 break;
4769 case SIOCGMIIREG:
4770 spin_lock_irqsave(&adapter->stats_lock, flags);
4771 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4772 &data->val_out)) {
4773 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774 return -EIO;
4775 }
4776 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777 break;
4778 case SIOCSMIIREG:
4779 if (data->reg_num & ~(0x1F))
4780 return -EFAULT;
4781 mii_reg = data->val_in;
4782 spin_lock_irqsave(&adapter->stats_lock, flags);
4783 if (e1000_write_phy_reg(hw, data->reg_num,
4784 mii_reg)) {
4785 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786 return -EIO;
4787 }
4788 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4789 if (hw->media_type == e1000_media_type_copper) {
4790 switch (data->reg_num) {
4791 case PHY_CTRL:
4792 if (mii_reg & MII_CR_POWER_DOWN)
4793 break;
4794 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4795 hw->autoneg = 1;
4796 hw->autoneg_advertised = 0x2F;
4797 } else {
4798 u32 speed;
4799 if (mii_reg & 0x40)
4800 speed = SPEED_1000;
4801 else if (mii_reg & 0x2000)
4802 speed = SPEED_100;
4803 else
4804 speed = SPEED_10;
4805 retval = e1000_set_spd_dplx(
4806 adapter, speed,
4807 ((mii_reg & 0x100)
4808 ? DUPLEX_FULL :
4809 DUPLEX_HALF));
4810 if (retval)
4811 return retval;
4812 }
4813 if (netif_running(adapter->netdev))
4814 e1000_reinit_locked(adapter);
4815 else
4816 e1000_reset(adapter);
4817 break;
4818 case M88E1000_PHY_SPEC_CTRL:
4819 case M88E1000_EXT_PHY_SPEC_CTRL:
4820 if (e1000_phy_reset(hw))
4821 return -EIO;
4822 break;
4823 }
4824 } else {
4825 switch (data->reg_num) {
4826 case PHY_CTRL:
4827 if (mii_reg & MII_CR_POWER_DOWN)
4828 break;
4829 if (netif_running(adapter->netdev))
4830 e1000_reinit_locked(adapter);
4831 else
4832 e1000_reset(adapter);
4833 break;
4834 }
4835 }
4836 break;
4837 default:
4838 return -EOPNOTSUPP;
4839 }
4840 return E1000_SUCCESS;
4841}
4842
4843void e1000_pci_set_mwi(struct e1000_hw *hw)
4844{
4845 struct e1000_adapter *adapter = hw->back;
4846 int ret_val = pci_set_mwi(adapter->pdev);
4847
4848 if (ret_val)
4849 e_err(probe, "Error in setting MWI\n");
4850}
4851
4852void e1000_pci_clear_mwi(struct e1000_hw *hw)
4853{
4854 struct e1000_adapter *adapter = hw->back;
4855
4856 pci_clear_mwi(adapter->pdev);
4857}
4858
4859int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4860{
4861 struct e1000_adapter *adapter = hw->back;
4862 return pcix_get_mmrbc(adapter->pdev);
4863}
4864
4865void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4866{
4867 struct e1000_adapter *adapter = hw->back;
4868 pcix_set_mmrbc(adapter->pdev, mmrbc);
4869}
4870
4871void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4872{
4873 outl(value, port);
4874}
4875
4876static bool e1000_vlan_used(struct e1000_adapter *adapter)
4877{
4878 u16 vid;
4879
4880 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4881 return true;
4882 return false;
4883}
4884
4885static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4886 netdev_features_t features)
4887{
4888 struct e1000_hw *hw = &adapter->hw;
4889 u32 ctrl;
4890
4891 ctrl = er32(CTRL);
4892 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4893 /* enable VLAN tag insert/strip */
4894 ctrl |= E1000_CTRL_VME;
4895 } else {
4896 /* disable VLAN tag insert/strip */
4897 ctrl &= ~E1000_CTRL_VME;
4898 }
4899 ew32(CTRL, ctrl);
4900}
4901static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4902 bool filter_on)
4903{
4904 struct e1000_hw *hw = &adapter->hw;
4905 u32 rctl;
4906
4907 if (!test_bit(__E1000_DOWN, &adapter->flags))
4908 e1000_irq_disable(adapter);
4909
4910 __e1000_vlan_mode(adapter, adapter->netdev->features);
4911 if (filter_on) {
4912 /* enable VLAN receive filtering */
4913 rctl = er32(RCTL);
4914 rctl &= ~E1000_RCTL_CFIEN;
4915 if (!(adapter->netdev->flags & IFF_PROMISC))
4916 rctl |= E1000_RCTL_VFE;
4917 ew32(RCTL, rctl);
4918 e1000_update_mng_vlan(adapter);
4919 } else {
4920 /* disable VLAN receive filtering */
4921 rctl = er32(RCTL);
4922 rctl &= ~E1000_RCTL_VFE;
4923 ew32(RCTL, rctl);
4924 }
4925
4926 if (!test_bit(__E1000_DOWN, &adapter->flags))
4927 e1000_irq_enable(adapter);
4928}
4929
4930static void e1000_vlan_mode(struct net_device *netdev,
4931 netdev_features_t features)
4932{
4933 struct e1000_adapter *adapter = netdev_priv(netdev);
4934
4935 if (!test_bit(__E1000_DOWN, &adapter->flags))
4936 e1000_irq_disable(adapter);
4937
4938 __e1000_vlan_mode(adapter, features);
4939
4940 if (!test_bit(__E1000_DOWN, &adapter->flags))
4941 e1000_irq_enable(adapter);
4942}
4943
4944static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4945 __be16 proto, u16 vid)
4946{
4947 struct e1000_adapter *adapter = netdev_priv(netdev);
4948 struct e1000_hw *hw = &adapter->hw;
4949 u32 vfta, index;
4950
4951 if ((hw->mng_cookie.status &
4952 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4953 (vid == adapter->mng_vlan_id))
4954 return 0;
4955
4956 if (!e1000_vlan_used(adapter))
4957 e1000_vlan_filter_on_off(adapter, true);
4958
4959 /* add VID to filter table */
4960 index = (vid >> 5) & 0x7F;
4961 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4962 vfta |= (1 << (vid & 0x1F));
4963 e1000_write_vfta(hw, index, vfta);
4964
4965 set_bit(vid, adapter->active_vlans);
4966
4967 return 0;
4968}
4969
4970static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4971 __be16 proto, u16 vid)
4972{
4973 struct e1000_adapter *adapter = netdev_priv(netdev);
4974 struct e1000_hw *hw = &adapter->hw;
4975 u32 vfta, index;
4976
4977 if (!test_bit(__E1000_DOWN, &adapter->flags))
4978 e1000_irq_disable(adapter);
4979 if (!test_bit(__E1000_DOWN, &adapter->flags))
4980 e1000_irq_enable(adapter);
4981
4982 /* remove VID from filter table */
4983 index = (vid >> 5) & 0x7F;
4984 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4985 vfta &= ~(1 << (vid & 0x1F));
4986 e1000_write_vfta(hw, index, vfta);
4987
4988 clear_bit(vid, adapter->active_vlans);
4989
4990 if (!e1000_vlan_used(adapter))
4991 e1000_vlan_filter_on_off(adapter, false);
4992
4993 return 0;
4994}
4995
4996static void e1000_restore_vlan(struct e1000_adapter *adapter)
4997{
4998 u16 vid;
4999
5000 if (!e1000_vlan_used(adapter))
5001 return;
5002
5003 e1000_vlan_filter_on_off(adapter, true);
5004 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5005 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5006}
5007
5008int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5009{
5010 struct e1000_hw *hw = &adapter->hw;
5011
5012 hw->autoneg = 0;
5013
5014 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5015 * for the switch() below to work
5016 */
5017 if ((spd & 1) || (dplx & ~1))
5018 goto err_inval;
5019
5020 /* Fiber NICs only allow 1000 gbps Full duplex */
5021 if ((hw->media_type == e1000_media_type_fiber) &&
5022 spd != SPEED_1000 &&
5023 dplx != DUPLEX_FULL)
5024 goto err_inval;
5025
5026 switch (spd + dplx) {
5027 case SPEED_10 + DUPLEX_HALF:
5028 hw->forced_speed_duplex = e1000_10_half;
5029 break;
5030 case SPEED_10 + DUPLEX_FULL:
5031 hw->forced_speed_duplex = e1000_10_full;
5032 break;
5033 case SPEED_100 + DUPLEX_HALF:
5034 hw->forced_speed_duplex = e1000_100_half;
5035 break;
5036 case SPEED_100 + DUPLEX_FULL:
5037 hw->forced_speed_duplex = e1000_100_full;
5038 break;
5039 case SPEED_1000 + DUPLEX_FULL:
5040 hw->autoneg = 1;
5041 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5042 break;
5043 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5044 default:
5045 goto err_inval;
5046 }
5047
5048 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5049 hw->mdix = AUTO_ALL_MODES;
5050
5051 return 0;
5052
5053err_inval:
5054 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5055 return -EINVAL;
5056}
5057
5058static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5059{
5060 struct net_device *netdev = pci_get_drvdata(pdev);
5061 struct e1000_adapter *adapter = netdev_priv(netdev);
5062 struct e1000_hw *hw = &adapter->hw;
5063 u32 ctrl, ctrl_ext, rctl, status;
5064 u32 wufc = adapter->wol;
5065
5066 netif_device_detach(netdev);
5067
5068 if (netif_running(netdev)) {
5069 int count = E1000_CHECK_RESET_COUNT;
5070
5071 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5072 usleep_range(10000, 20000);
5073
5074 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5075 e1000_down(adapter);
5076 }
5077
5078 status = er32(STATUS);
5079 if (status & E1000_STATUS_LU)
5080 wufc &= ~E1000_WUFC_LNKC;
5081
5082 if (wufc) {
5083 e1000_setup_rctl(adapter);
5084 e1000_set_rx_mode(netdev);
5085
5086 rctl = er32(RCTL);
5087
5088 /* turn on all-multi mode if wake on multicast is enabled */
5089 if (wufc & E1000_WUFC_MC)
5090 rctl |= E1000_RCTL_MPE;
5091
5092 /* enable receives in the hardware */
5093 ew32(RCTL, rctl | E1000_RCTL_EN);
5094
5095 if (hw->mac_type >= e1000_82540) {
5096 ctrl = er32(CTRL);
5097 /* advertise wake from D3Cold */
5098 #define E1000_CTRL_ADVD3WUC 0x00100000
5099 /* phy power management enable */
5100 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5101 ctrl |= E1000_CTRL_ADVD3WUC |
5102 E1000_CTRL_EN_PHY_PWR_MGMT;
5103 ew32(CTRL, ctrl);
5104 }
5105
5106 if (hw->media_type == e1000_media_type_fiber ||
5107 hw->media_type == e1000_media_type_internal_serdes) {
5108 /* keep the laser running in D3 */
5109 ctrl_ext = er32(CTRL_EXT);
5110 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5111 ew32(CTRL_EXT, ctrl_ext);
5112 }
5113
5114 ew32(WUC, E1000_WUC_PME_EN);
5115 ew32(WUFC, wufc);
5116 } else {
5117 ew32(WUC, 0);
5118 ew32(WUFC, 0);
5119 }
5120
5121 e1000_release_manageability(adapter);
5122
5123 *enable_wake = !!wufc;
5124
5125 /* make sure adapter isn't asleep if manageability is enabled */
5126 if (adapter->en_mng_pt)
5127 *enable_wake = true;
5128
5129 if (netif_running(netdev))
5130 e1000_free_irq(adapter);
5131
5132 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5133 pci_disable_device(pdev);
5134
5135 return 0;
5136}
5137
5138static int __maybe_unused e1000_suspend(struct device *dev)
5139{
5140 int retval;
5141 struct pci_dev *pdev = to_pci_dev(dev);
5142 bool wake;
5143
5144 retval = __e1000_shutdown(pdev, &wake);
5145 device_set_wakeup_enable(dev, wake);
5146
5147 return retval;
5148}
5149
5150static int __maybe_unused e1000_resume(struct device *dev)
5151{
5152 struct pci_dev *pdev = to_pci_dev(dev);
5153 struct net_device *netdev = pci_get_drvdata(pdev);
5154 struct e1000_adapter *adapter = netdev_priv(netdev);
5155 struct e1000_hw *hw = &adapter->hw;
5156 u32 err;
5157
5158 if (adapter->need_ioport)
5159 err = pci_enable_device(pdev);
5160 else
5161 err = pci_enable_device_mem(pdev);
5162 if (err) {
5163 pr_err("Cannot enable PCI device from suspend\n");
5164 return err;
5165 }
5166
5167 /* flush memory to make sure state is correct */
5168 smp_mb__before_atomic();
5169 clear_bit(__E1000_DISABLED, &adapter->flags);
5170 pci_set_master(pdev);
5171
5172 pci_enable_wake(pdev, PCI_D3hot, 0);
5173 pci_enable_wake(pdev, PCI_D3cold, 0);
5174
5175 if (netif_running(netdev)) {
5176 err = e1000_request_irq(adapter);
5177 if (err)
5178 return err;
5179 }
5180
5181 e1000_power_up_phy(adapter);
5182 e1000_reset(adapter);
5183 ew32(WUS, ~0);
5184
5185 e1000_init_manageability(adapter);
5186
5187 if (netif_running(netdev))
5188 e1000_up(adapter);
5189
5190 netif_device_attach(netdev);
5191
5192 return 0;
5193}
5194
5195static void e1000_shutdown(struct pci_dev *pdev)
5196{
5197 bool wake;
5198
5199 __e1000_shutdown(pdev, &wake);
5200
5201 if (system_state == SYSTEM_POWER_OFF) {
5202 pci_wake_from_d3(pdev, wake);
5203 pci_set_power_state(pdev, PCI_D3hot);
5204 }
5205}
5206
5207#ifdef CONFIG_NET_POLL_CONTROLLER
5208/* Polling 'interrupt' - used by things like netconsole to send skbs
5209 * without having to re-enable interrupts. It's not called while
5210 * the interrupt routine is executing.
5211 */
5212static void e1000_netpoll(struct net_device *netdev)
5213{
5214 struct e1000_adapter *adapter = netdev_priv(netdev);
5215
5216 if (disable_hardirq(adapter->pdev->irq))
5217 e1000_intr(adapter->pdev->irq, netdev);
5218 enable_irq(adapter->pdev->irq);
5219}
5220#endif
5221
5222/**
5223 * e1000_io_error_detected - called when PCI error is detected
5224 * @pdev: Pointer to PCI device
5225 * @state: The current pci connection state
5226 *
5227 * This function is called after a PCI bus error affecting
5228 * this device has been detected.
5229 */
5230static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5231 pci_channel_state_t state)
5232{
5233 struct net_device *netdev = pci_get_drvdata(pdev);
5234 struct e1000_adapter *adapter = netdev_priv(netdev);
5235
5236 netif_device_detach(netdev);
5237
5238 if (state == pci_channel_io_perm_failure)
5239 return PCI_ERS_RESULT_DISCONNECT;
5240
5241 if (netif_running(netdev))
5242 e1000_down(adapter);
5243
5244 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5245 pci_disable_device(pdev);
5246
5247 /* Request a slot reset. */
5248 return PCI_ERS_RESULT_NEED_RESET;
5249}
5250
5251/**
5252 * e1000_io_slot_reset - called after the pci bus has been reset.
5253 * @pdev: Pointer to PCI device
5254 *
5255 * Restart the card from scratch, as if from a cold-boot. Implementation
5256 * resembles the first-half of the e1000_resume routine.
5257 */
5258static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5259{
5260 struct net_device *netdev = pci_get_drvdata(pdev);
5261 struct e1000_adapter *adapter = netdev_priv(netdev);
5262 struct e1000_hw *hw = &adapter->hw;
5263 int err;
5264
5265 if (adapter->need_ioport)
5266 err = pci_enable_device(pdev);
5267 else
5268 err = pci_enable_device_mem(pdev);
5269 if (err) {
5270 pr_err("Cannot re-enable PCI device after reset.\n");
5271 return PCI_ERS_RESULT_DISCONNECT;
5272 }
5273
5274 /* flush memory to make sure state is correct */
5275 smp_mb__before_atomic();
5276 clear_bit(__E1000_DISABLED, &adapter->flags);
5277 pci_set_master(pdev);
5278
5279 pci_enable_wake(pdev, PCI_D3hot, 0);
5280 pci_enable_wake(pdev, PCI_D3cold, 0);
5281
5282 e1000_reset(adapter);
5283 ew32(WUS, ~0);
5284
5285 return PCI_ERS_RESULT_RECOVERED;
5286}
5287
5288/**
5289 * e1000_io_resume - called when traffic can start flowing again.
5290 * @pdev: Pointer to PCI device
5291 *
5292 * This callback is called when the error recovery driver tells us that
5293 * its OK to resume normal operation. Implementation resembles the
5294 * second-half of the e1000_resume routine.
5295 */
5296static void e1000_io_resume(struct pci_dev *pdev)
5297{
5298 struct net_device *netdev = pci_get_drvdata(pdev);
5299 struct e1000_adapter *adapter = netdev_priv(netdev);
5300
5301 e1000_init_manageability(adapter);
5302
5303 if (netif_running(netdev)) {
5304 if (e1000_up(adapter)) {
5305 pr_info("can't bring device back up after reset\n");
5306 return;
5307 }
5308 }
5309
5310 netif_device_attach(netdev);
5311}
5312
5313/* e1000_main.c */
1/*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32#include <linux/prefetch.h>
33#include <linux/bitops.h>
34#include <linux/if_vlan.h>
35
36char e1000_driver_name[] = "e1000";
37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38#define DRV_VERSION "7.3.21-k8-NAPI"
39const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
49static const struct pci_device_id e1000_pci_tbl[] = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void e1000_remove(struct pci_dev *pdev);
115static int e1000_alloc_queues(struct e1000_adapter *adapter);
116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128static void e1000_set_rx_mode(struct net_device *netdev);
129static void e1000_update_phy_info_task(struct work_struct *work);
130static void e1000_watchdog(struct work_struct *work);
131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
137static irqreturn_t e1000_intr(int irq, void *data);
138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140static int e1000_clean(struct napi_struct *napi, int budget);
141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
149 int cleaned_count)
150{
151}
152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
154 int cleaned_count);
155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 int cmd);
161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163static void e1000_tx_timeout(struct net_device *dev);
164static void e1000_reset_task(struct work_struct *work);
165static void e1000_smartspeed(struct e1000_adapter *adapter);
166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
168
169static bool e1000_vlan_used(struct e1000_adapter *adapter);
170static void e1000_vlan_mode(struct net_device *netdev,
171 netdev_features_t features);
172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173 bool filter_on);
174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 __be16 proto, u16 vid);
176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 __be16 proto, u16 vid);
178static void e1000_restore_vlan(struct e1000_adapter *adapter);
179
180#ifdef CONFIG_PM
181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182static int e1000_resume(struct pci_dev *pdev);
183#endif
184static void e1000_shutdown(struct pci_dev *pdev);
185
186#ifdef CONFIG_NET_POLL_CONTROLLER
187/* for netdump / net console */
188static void e1000_netpoll (struct net_device *netdev);
189#endif
190
191#define COPYBREAK_DEFAULT 256
192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193module_param(copybreak, uint, 0644);
194MODULE_PARM_DESC(copybreak,
195 "Maximum size of packet that is copied to a new buffer on receive");
196
197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198 pci_channel_state_t state);
199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200static void e1000_io_resume(struct pci_dev *pdev);
201
202static const struct pci_error_handlers e1000_err_handler = {
203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
206};
207
208static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
212 .remove = e1000_remove,
213#ifdef CONFIG_PM
214 /* Power Management Hooks */
215 .suspend = e1000_suspend,
216 .resume = e1000_resume,
217#endif
218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
220};
221
222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224MODULE_LICENSE("GPL");
225MODULE_VERSION(DRV_VERSION);
226
227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228static int debug = -1;
229module_param(debug, int, 0);
230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231
232/**
233 * e1000_get_hw_dev - return device
234 * used by hardware layer to print debugging information
235 *
236 **/
237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238{
239 struct e1000_adapter *adapter = hw->back;
240 return adapter->netdev;
241}
242
243/**
244 * e1000_init_module - Driver Registration Routine
245 *
246 * e1000_init_module is the first routine called when the driver is
247 * loaded. All it does is register with the PCI subsystem.
248 **/
249static int __init e1000_init_module(void)
250{
251 int ret;
252 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
253
254 pr_info("%s\n", e1000_copyright);
255
256 ret = pci_register_driver(&e1000_driver);
257 if (copybreak != COPYBREAK_DEFAULT) {
258 if (copybreak == 0)
259 pr_info("copybreak disabled\n");
260 else
261 pr_info("copybreak enabled for "
262 "packets <= %u bytes\n", copybreak);
263 }
264 return ret;
265}
266
267module_init(e1000_init_module);
268
269/**
270 * e1000_exit_module - Driver Exit Cleanup Routine
271 *
272 * e1000_exit_module is called just before the driver is removed
273 * from memory.
274 **/
275static void __exit e1000_exit_module(void)
276{
277 pci_unregister_driver(&e1000_driver);
278}
279
280module_exit(e1000_exit_module);
281
282static int e1000_request_irq(struct e1000_adapter *adapter)
283{
284 struct net_device *netdev = adapter->netdev;
285 irq_handler_t handler = e1000_intr;
286 int irq_flags = IRQF_SHARED;
287 int err;
288
289 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
290 netdev);
291 if (err) {
292 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 }
294
295 return err;
296}
297
298static void e1000_free_irq(struct e1000_adapter *adapter)
299{
300 struct net_device *netdev = adapter->netdev;
301
302 free_irq(adapter->pdev->irq, netdev);
303}
304
305/**
306 * e1000_irq_disable - Mask off interrupt generation on the NIC
307 * @adapter: board private structure
308 **/
309static void e1000_irq_disable(struct e1000_adapter *adapter)
310{
311 struct e1000_hw *hw = &adapter->hw;
312
313 ew32(IMC, ~0);
314 E1000_WRITE_FLUSH();
315 synchronize_irq(adapter->pdev->irq);
316}
317
318/**
319 * e1000_irq_enable - Enable default interrupt generation settings
320 * @adapter: board private structure
321 **/
322static void e1000_irq_enable(struct e1000_adapter *adapter)
323{
324 struct e1000_hw *hw = &adapter->hw;
325
326 ew32(IMS, IMS_ENABLE_MASK);
327 E1000_WRITE_FLUSH();
328}
329
330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331{
332 struct e1000_hw *hw = &adapter->hw;
333 struct net_device *netdev = adapter->netdev;
334 u16 vid = hw->mng_cookie.vlan_id;
335 u16 old_vid = adapter->mng_vlan_id;
336
337 if (!e1000_vlan_used(adapter))
338 return;
339
340 if (!test_bit(vid, adapter->active_vlans)) {
341 if (hw->mng_cookie.status &
342 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344 adapter->mng_vlan_id = vid;
345 } else {
346 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347 }
348 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349 (vid != old_vid) &&
350 !test_bit(old_vid, adapter->active_vlans))
351 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352 old_vid);
353 } else {
354 adapter->mng_vlan_id = vid;
355 }
356}
357
358static void e1000_init_manageability(struct e1000_adapter *adapter)
359{
360 struct e1000_hw *hw = &adapter->hw;
361
362 if (adapter->en_mng_pt) {
363 u32 manc = er32(MANC);
364
365 /* disable hardware interception of ARP */
366 manc &= ~(E1000_MANC_ARP_EN);
367
368 ew32(MANC, manc);
369 }
370}
371
372static void e1000_release_manageability(struct e1000_adapter *adapter)
373{
374 struct e1000_hw *hw = &adapter->hw;
375
376 if (adapter->en_mng_pt) {
377 u32 manc = er32(MANC);
378
379 /* re-enable hardware interception of ARP */
380 manc |= E1000_MANC_ARP_EN;
381
382 ew32(MANC, manc);
383 }
384}
385
386/**
387 * e1000_configure - configure the hardware for RX and TX
388 * @adapter = private board structure
389 **/
390static void e1000_configure(struct e1000_adapter *adapter)
391{
392 struct net_device *netdev = adapter->netdev;
393 int i;
394
395 e1000_set_rx_mode(netdev);
396
397 e1000_restore_vlan(adapter);
398 e1000_init_manageability(adapter);
399
400 e1000_configure_tx(adapter);
401 e1000_setup_rctl(adapter);
402 e1000_configure_rx(adapter);
403 /* call E1000_DESC_UNUSED which always leaves
404 * at least 1 descriptor unused to make sure
405 * next_to_use != next_to_clean
406 */
407 for (i = 0; i < adapter->num_rx_queues; i++) {
408 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409 adapter->alloc_rx_buf(adapter, ring,
410 E1000_DESC_UNUSED(ring));
411 }
412}
413
414int e1000_up(struct e1000_adapter *adapter)
415{
416 struct e1000_hw *hw = &adapter->hw;
417
418 /* hardware has been reset, we need to reload some things */
419 e1000_configure(adapter);
420
421 clear_bit(__E1000_DOWN, &adapter->flags);
422
423 napi_enable(&adapter->napi);
424
425 e1000_irq_enable(adapter);
426
427 netif_wake_queue(adapter->netdev);
428
429 /* fire a link change interrupt to start the watchdog */
430 ew32(ICS, E1000_ICS_LSC);
431 return 0;
432}
433
434/**
435 * e1000_power_up_phy - restore link in case the phy was powered down
436 * @adapter: address of board private structure
437 *
438 * The phy may be powered down to save power and turn off link when the
439 * driver is unloaded and wake on lan is not enabled (among others)
440 * *** this routine MUST be followed by a call to e1000_reset ***
441 **/
442void e1000_power_up_phy(struct e1000_adapter *adapter)
443{
444 struct e1000_hw *hw = &adapter->hw;
445 u16 mii_reg = 0;
446
447 /* Just clear the power down bit to wake the phy back up */
448 if (hw->media_type == e1000_media_type_copper) {
449 /* according to the manual, the phy will retain its
450 * settings across a power-down/up cycle
451 */
452 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453 mii_reg &= ~MII_CR_POWER_DOWN;
454 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
455 }
456}
457
458static void e1000_power_down_phy(struct e1000_adapter *adapter)
459{
460 struct e1000_hw *hw = &adapter->hw;
461
462 /* Power down the PHY so no link is implied when interface is down *
463 * The PHY cannot be powered down if any of the following is true *
464 * (a) WoL is enabled
465 * (b) AMT is active
466 * (c) SoL/IDER session is active
467 */
468 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 hw->media_type == e1000_media_type_copper) {
470 u16 mii_reg = 0;
471
472 switch (hw->mac_type) {
473 case e1000_82540:
474 case e1000_82545:
475 case e1000_82545_rev_3:
476 case e1000_82546:
477 case e1000_ce4100:
478 case e1000_82546_rev_3:
479 case e1000_82541:
480 case e1000_82541_rev_2:
481 case e1000_82547:
482 case e1000_82547_rev_2:
483 if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 goto out;
485 break;
486 default:
487 goto out;
488 }
489 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490 mii_reg |= MII_CR_POWER_DOWN;
491 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
492 msleep(1);
493 }
494out:
495 return;
496}
497
498static void e1000_down_and_stop(struct e1000_adapter *adapter)
499{
500 set_bit(__E1000_DOWN, &adapter->flags);
501
502 cancel_delayed_work_sync(&adapter->watchdog_task);
503
504 /*
505 * Since the watchdog task can reschedule other tasks, we should cancel
506 * it first, otherwise we can run into the situation when a work is
507 * still running after the adapter has been turned down.
508 */
509
510 cancel_delayed_work_sync(&adapter->phy_info_task);
511 cancel_delayed_work_sync(&adapter->fifo_stall_task);
512
513 /* Only kill reset task if adapter is not resetting */
514 if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 cancel_work_sync(&adapter->reset_task);
516}
517
518void e1000_down(struct e1000_adapter *adapter)
519{
520 struct e1000_hw *hw = &adapter->hw;
521 struct net_device *netdev = adapter->netdev;
522 u32 rctl, tctl;
523
524 netif_carrier_off(netdev);
525
526 /* disable receives in the hardware */
527 rctl = er32(RCTL);
528 ew32(RCTL, rctl & ~E1000_RCTL_EN);
529 /* flush and sleep below */
530
531 netif_tx_disable(netdev);
532
533 /* disable transmits in the hardware */
534 tctl = er32(TCTL);
535 tctl &= ~E1000_TCTL_EN;
536 ew32(TCTL, tctl);
537 /* flush both disables and wait for them to finish */
538 E1000_WRITE_FLUSH();
539 msleep(10);
540
541 napi_disable(&adapter->napi);
542
543 e1000_irq_disable(adapter);
544
545 /* Setting DOWN must be after irq_disable to prevent
546 * a screaming interrupt. Setting DOWN also prevents
547 * tasks from rescheduling.
548 */
549 e1000_down_and_stop(adapter);
550
551 adapter->link_speed = 0;
552 adapter->link_duplex = 0;
553
554 e1000_reset(adapter);
555 e1000_clean_all_tx_rings(adapter);
556 e1000_clean_all_rx_rings(adapter);
557}
558
559void e1000_reinit_locked(struct e1000_adapter *adapter)
560{
561 WARN_ON(in_interrupt());
562 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
563 msleep(1);
564 e1000_down(adapter);
565 e1000_up(adapter);
566 clear_bit(__E1000_RESETTING, &adapter->flags);
567}
568
569void e1000_reset(struct e1000_adapter *adapter)
570{
571 struct e1000_hw *hw = &adapter->hw;
572 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
573 bool legacy_pba_adjust = false;
574 u16 hwm;
575
576 /* Repartition Pba for greater than 9k mtu
577 * To take effect CTRL.RST is required.
578 */
579
580 switch (hw->mac_type) {
581 case e1000_82542_rev2_0:
582 case e1000_82542_rev2_1:
583 case e1000_82543:
584 case e1000_82544:
585 case e1000_82540:
586 case e1000_82541:
587 case e1000_82541_rev_2:
588 legacy_pba_adjust = true;
589 pba = E1000_PBA_48K;
590 break;
591 case e1000_82545:
592 case e1000_82545_rev_3:
593 case e1000_82546:
594 case e1000_ce4100:
595 case e1000_82546_rev_3:
596 pba = E1000_PBA_48K;
597 break;
598 case e1000_82547:
599 case e1000_82547_rev_2:
600 legacy_pba_adjust = true;
601 pba = E1000_PBA_30K;
602 break;
603 case e1000_undefined:
604 case e1000_num_macs:
605 break;
606 }
607
608 if (legacy_pba_adjust) {
609 if (hw->max_frame_size > E1000_RXBUFFER_8192)
610 pba -= 8; /* allocate more FIFO for Tx */
611
612 if (hw->mac_type == e1000_82547) {
613 adapter->tx_fifo_head = 0;
614 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
615 adapter->tx_fifo_size =
616 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
617 atomic_set(&adapter->tx_fifo_stall, 0);
618 }
619 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
620 /* adjust PBA for jumbo frames */
621 ew32(PBA, pba);
622
623 /* To maintain wire speed transmits, the Tx FIFO should be
624 * large enough to accommodate two full transmit packets,
625 * rounded up to the next 1KB and expressed in KB. Likewise,
626 * the Rx FIFO should be large enough to accommodate at least
627 * one full receive packet and is similarly rounded up and
628 * expressed in KB.
629 */
630 pba = er32(PBA);
631 /* upper 16 bits has Tx packet buffer allocation size in KB */
632 tx_space = pba >> 16;
633 /* lower 16 bits has Rx packet buffer allocation size in KB */
634 pba &= 0xffff;
635 /* the Tx fifo also stores 16 bytes of information about the Tx
636 * but don't include ethernet FCS because hardware appends it
637 */
638 min_tx_space = (hw->max_frame_size +
639 sizeof(struct e1000_tx_desc) -
640 ETH_FCS_LEN) * 2;
641 min_tx_space = ALIGN(min_tx_space, 1024);
642 min_tx_space >>= 10;
643 /* software strips receive CRC, so leave room for it */
644 min_rx_space = hw->max_frame_size;
645 min_rx_space = ALIGN(min_rx_space, 1024);
646 min_rx_space >>= 10;
647
648 /* If current Tx allocation is less than the min Tx FIFO size,
649 * and the min Tx FIFO size is less than the current Rx FIFO
650 * allocation, take space away from current Rx allocation
651 */
652 if (tx_space < min_tx_space &&
653 ((min_tx_space - tx_space) < pba)) {
654 pba = pba - (min_tx_space - tx_space);
655
656 /* PCI/PCIx hardware has PBA alignment constraints */
657 switch (hw->mac_type) {
658 case e1000_82545 ... e1000_82546_rev_3:
659 pba &= ~(E1000_PBA_8K - 1);
660 break;
661 default:
662 break;
663 }
664
665 /* if short on Rx space, Rx wins and must trump Tx
666 * adjustment or use Early Receive if available
667 */
668 if (pba < min_rx_space)
669 pba = min_rx_space;
670 }
671 }
672
673 ew32(PBA, pba);
674
675 /* flow control settings:
676 * The high water mark must be low enough to fit one full frame
677 * (or the size used for early receive) above it in the Rx FIFO.
678 * Set it to the lower of:
679 * - 90% of the Rx FIFO size, and
680 * - the full Rx FIFO size minus the early receive size (for parts
681 * with ERT support assuming ERT set to E1000_ERT_2048), or
682 * - the full Rx FIFO size minus one full frame
683 */
684 hwm = min(((pba << 10) * 9 / 10),
685 ((pba << 10) - hw->max_frame_size));
686
687 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
688 hw->fc_low_water = hw->fc_high_water - 8;
689 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
690 hw->fc_send_xon = 1;
691 hw->fc = hw->original_fc;
692
693 /* Allow time for pending master requests to run */
694 e1000_reset_hw(hw);
695 if (hw->mac_type >= e1000_82544)
696 ew32(WUC, 0);
697
698 if (e1000_init_hw(hw))
699 e_dev_err("Hardware Error\n");
700 e1000_update_mng_vlan(adapter);
701
702 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
703 if (hw->mac_type >= e1000_82544 &&
704 hw->autoneg == 1 &&
705 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
706 u32 ctrl = er32(CTRL);
707 /* clear phy power management bit if we are in gig only mode,
708 * which if enabled will attempt negotiation to 100Mb, which
709 * can cause a loss of link at power off or driver unload
710 */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
712 ew32(CTRL, ctrl);
713 }
714
715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
717
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
720
721 e1000_release_manageability(adapter);
722}
723
724/* Dump the eeprom for users having checksum issues */
725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
726{
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
738 if (!data)
739 return;
740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
752
753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
768
769 kfree(data);
770}
771
772/**
773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
775 *
776 * Return true if an adapter needs ioport resources
777 **/
778static int e1000_is_need_ioport(struct pci_dev *pdev)
779{
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806}
807
808static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
810{
811 /* Since there is no support for separate Rx/Tx vlan accel
812 * enable/disable make sure Tx flag is always in same state as Rx.
813 */
814 if (features & NETIF_F_HW_VLAN_CTAG_RX)
815 features |= NETIF_F_HW_VLAN_CTAG_TX;
816 else
817 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
818
819 return features;
820}
821
822static int e1000_set_features(struct net_device *netdev,
823 netdev_features_t features)
824{
825 struct e1000_adapter *adapter = netdev_priv(netdev);
826 netdev_features_t changed = features ^ netdev->features;
827
828 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
829 e1000_vlan_mode(netdev, features);
830
831 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
832 return 0;
833
834 netdev->features = features;
835 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
836
837 if (netif_running(netdev))
838 e1000_reinit_locked(adapter);
839 else
840 e1000_reset(adapter);
841
842 return 0;
843}
844
845static const struct net_device_ops e1000_netdev_ops = {
846 .ndo_open = e1000_open,
847 .ndo_stop = e1000_close,
848 .ndo_start_xmit = e1000_xmit_frame,
849 .ndo_get_stats = e1000_get_stats,
850 .ndo_set_rx_mode = e1000_set_rx_mode,
851 .ndo_set_mac_address = e1000_set_mac,
852 .ndo_tx_timeout = e1000_tx_timeout,
853 .ndo_change_mtu = e1000_change_mtu,
854 .ndo_do_ioctl = e1000_ioctl,
855 .ndo_validate_addr = eth_validate_addr,
856 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
857 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
858#ifdef CONFIG_NET_POLL_CONTROLLER
859 .ndo_poll_controller = e1000_netpoll,
860#endif
861 .ndo_fix_features = e1000_fix_features,
862 .ndo_set_features = e1000_set_features,
863};
864
865/**
866 * e1000_init_hw_struct - initialize members of hw struct
867 * @adapter: board private struct
868 * @hw: structure used by e1000_hw.c
869 *
870 * Factors out initialization of the e1000_hw struct to its own function
871 * that can be called very early at init (just after struct allocation).
872 * Fields are initialized based on PCI device information and
873 * OS network device settings (MTU size).
874 * Returns negative error codes if MAC type setup fails.
875 */
876static int e1000_init_hw_struct(struct e1000_adapter *adapter,
877 struct e1000_hw *hw)
878{
879 struct pci_dev *pdev = adapter->pdev;
880
881 /* PCI config space info */
882 hw->vendor_id = pdev->vendor;
883 hw->device_id = pdev->device;
884 hw->subsystem_vendor_id = pdev->subsystem_vendor;
885 hw->subsystem_id = pdev->subsystem_device;
886 hw->revision_id = pdev->revision;
887
888 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
889
890 hw->max_frame_size = adapter->netdev->mtu +
891 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
892 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
893
894 /* identify the MAC */
895 if (e1000_set_mac_type(hw)) {
896 e_err(probe, "Unknown MAC Type\n");
897 return -EIO;
898 }
899
900 switch (hw->mac_type) {
901 default:
902 break;
903 case e1000_82541:
904 case e1000_82547:
905 case e1000_82541_rev_2:
906 case e1000_82547_rev_2:
907 hw->phy_init_script = 1;
908 break;
909 }
910
911 e1000_set_media_type(hw);
912 e1000_get_bus_info(hw);
913
914 hw->wait_autoneg_complete = false;
915 hw->tbi_compatibility_en = true;
916 hw->adaptive_ifs = true;
917
918 /* Copper options */
919
920 if (hw->media_type == e1000_media_type_copper) {
921 hw->mdix = AUTO_ALL_MODES;
922 hw->disable_polarity_correction = false;
923 hw->master_slave = E1000_MASTER_SLAVE;
924 }
925
926 return 0;
927}
928
929/**
930 * e1000_probe - Device Initialization Routine
931 * @pdev: PCI device information struct
932 * @ent: entry in e1000_pci_tbl
933 *
934 * Returns 0 on success, negative on failure
935 *
936 * e1000_probe initializes an adapter identified by a pci_dev structure.
937 * The OS initialization, configuring of the adapter private structure,
938 * and a hardware reset occur.
939 **/
940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
941{
942 struct net_device *netdev;
943 struct e1000_adapter *adapter;
944 struct e1000_hw *hw;
945
946 static int cards_found;
947 static int global_quad_port_a; /* global ksp3 port a indication */
948 int i, err, pci_using_dac;
949 u16 eeprom_data = 0;
950 u16 tmp = 0;
951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 int bars, need_ioport;
953
954 /* do not allocate ioport bars when not needed */
955 need_ioport = e1000_is_need_ioport(pdev);
956 if (need_ioport) {
957 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
958 err = pci_enable_device(pdev);
959 } else {
960 bars = pci_select_bars(pdev, IORESOURCE_MEM);
961 err = pci_enable_device_mem(pdev);
962 }
963 if (err)
964 return err;
965
966 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
967 if (err)
968 goto err_pci_reg;
969
970 pci_set_master(pdev);
971 err = pci_save_state(pdev);
972 if (err)
973 goto err_alloc_etherdev;
974
975 err = -ENOMEM;
976 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
977 if (!netdev)
978 goto err_alloc_etherdev;
979
980 SET_NETDEV_DEV(netdev, &pdev->dev);
981
982 pci_set_drvdata(pdev, netdev);
983 adapter = netdev_priv(netdev);
984 adapter->netdev = netdev;
985 adapter->pdev = pdev;
986 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
987 adapter->bars = bars;
988 adapter->need_ioport = need_ioport;
989
990 hw = &adapter->hw;
991 hw->back = adapter;
992
993 err = -EIO;
994 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
995 if (!hw->hw_addr)
996 goto err_ioremap;
997
998 if (adapter->need_ioport) {
999 for (i = BAR_1; i <= BAR_5; i++) {
1000 if (pci_resource_len(pdev, i) == 0)
1001 continue;
1002 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003 hw->io_base = pci_resource_start(pdev, i);
1004 break;
1005 }
1006 }
1007 }
1008
1009 /* make ready for any if (hw->...) below */
1010 err = e1000_init_hw_struct(adapter, hw);
1011 if (err)
1012 goto err_sw_init;
1013
1014 /* there is a workaround being applied below that limits
1015 * 64-bit DMA addresses to 64-bit hardware. There are some
1016 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1017 */
1018 pci_using_dac = 0;
1019 if ((hw->bus_type == e1000_bus_type_pcix) &&
1020 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1021 pci_using_dac = 1;
1022 } else {
1023 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1024 if (err) {
1025 pr_err("No usable DMA config, aborting\n");
1026 goto err_dma;
1027 }
1028 }
1029
1030 netdev->netdev_ops = &e1000_netdev_ops;
1031 e1000_set_ethtool_ops(netdev);
1032 netdev->watchdog_timeo = 5 * HZ;
1033 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1034
1035 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1036
1037 adapter->bd_number = cards_found;
1038
1039 /* setup the private structure */
1040
1041 err = e1000_sw_init(adapter);
1042 if (err)
1043 goto err_sw_init;
1044
1045 err = -EIO;
1046 if (hw->mac_type == e1000_ce4100) {
1047 hw->ce4100_gbe_mdio_base_virt =
1048 ioremap(pci_resource_start(pdev, BAR_1),
1049 pci_resource_len(pdev, BAR_1));
1050
1051 if (!hw->ce4100_gbe_mdio_base_virt)
1052 goto err_mdio_ioremap;
1053 }
1054
1055 if (hw->mac_type >= e1000_82543) {
1056 netdev->hw_features = NETIF_F_SG |
1057 NETIF_F_HW_CSUM |
1058 NETIF_F_HW_VLAN_CTAG_RX;
1059 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1060 NETIF_F_HW_VLAN_CTAG_FILTER;
1061 }
1062
1063 if ((hw->mac_type >= e1000_82544) &&
1064 (hw->mac_type != e1000_82547))
1065 netdev->hw_features |= NETIF_F_TSO;
1066
1067 netdev->priv_flags |= IFF_SUPP_NOFCS;
1068
1069 netdev->features |= netdev->hw_features;
1070 netdev->hw_features |= (NETIF_F_RXCSUM |
1071 NETIF_F_RXALL |
1072 NETIF_F_RXFCS);
1073
1074 if (pci_using_dac) {
1075 netdev->features |= NETIF_F_HIGHDMA;
1076 netdev->vlan_features |= NETIF_F_HIGHDMA;
1077 }
1078
1079 netdev->vlan_features |= (NETIF_F_TSO |
1080 NETIF_F_HW_CSUM |
1081 NETIF_F_SG);
1082
1083 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1084 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1085 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1086 netdev->priv_flags |= IFF_UNICAST_FLT;
1087
1088 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1089
1090 /* initialize eeprom parameters */
1091 if (e1000_init_eeprom_params(hw)) {
1092 e_err(probe, "EEPROM initialization failed\n");
1093 goto err_eeprom;
1094 }
1095
1096 /* before reading the EEPROM, reset the controller to
1097 * put the device in a known good starting state
1098 */
1099
1100 e1000_reset_hw(hw);
1101
1102 /* make sure the EEPROM is good */
1103 if (e1000_validate_eeprom_checksum(hw) < 0) {
1104 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1105 e1000_dump_eeprom(adapter);
1106 /* set MAC address to all zeroes to invalidate and temporary
1107 * disable this device for the user. This blocks regular
1108 * traffic while still permitting ethtool ioctls from reaching
1109 * the hardware as well as allowing the user to run the
1110 * interface after manually setting a hw addr using
1111 * `ip set address`
1112 */
1113 memset(hw->mac_addr, 0, netdev->addr_len);
1114 } else {
1115 /* copy the MAC address out of the EEPROM */
1116 if (e1000_read_mac_addr(hw))
1117 e_err(probe, "EEPROM Read Error\n");
1118 }
1119 /* don't block initialization here due to bad MAC address */
1120 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1121
1122 if (!is_valid_ether_addr(netdev->dev_addr))
1123 e_err(probe, "Invalid MAC Address\n");
1124
1125
1126 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1127 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1128 e1000_82547_tx_fifo_stall_task);
1129 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1130 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1131
1132 e1000_check_options(adapter);
1133
1134 /* Initial Wake on LAN setting
1135 * If APM wake is enabled in the EEPROM,
1136 * enable the ACPI Magic Packet filter
1137 */
1138
1139 switch (hw->mac_type) {
1140 case e1000_82542_rev2_0:
1141 case e1000_82542_rev2_1:
1142 case e1000_82543:
1143 break;
1144 case e1000_82544:
1145 e1000_read_eeprom(hw,
1146 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1147 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1148 break;
1149 case e1000_82546:
1150 case e1000_82546_rev_3:
1151 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1152 e1000_read_eeprom(hw,
1153 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1154 break;
1155 }
1156 /* Fall Through */
1157 default:
1158 e1000_read_eeprom(hw,
1159 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1160 break;
1161 }
1162 if (eeprom_data & eeprom_apme_mask)
1163 adapter->eeprom_wol |= E1000_WUFC_MAG;
1164
1165 /* now that we have the eeprom settings, apply the special cases
1166 * where the eeprom may be wrong or the board simply won't support
1167 * wake on lan on a particular port
1168 */
1169 switch (pdev->device) {
1170 case E1000_DEV_ID_82546GB_PCIE:
1171 adapter->eeprom_wol = 0;
1172 break;
1173 case E1000_DEV_ID_82546EB_FIBER:
1174 case E1000_DEV_ID_82546GB_FIBER:
1175 /* Wake events only supported on port A for dual fiber
1176 * regardless of eeprom setting
1177 */
1178 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1179 adapter->eeprom_wol = 0;
1180 break;
1181 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1182 /* if quad port adapter, disable WoL on all but port A */
1183 if (global_quad_port_a != 0)
1184 adapter->eeprom_wol = 0;
1185 else
1186 adapter->quad_port_a = true;
1187 /* Reset for multiple quad port adapters */
1188 if (++global_quad_port_a == 4)
1189 global_quad_port_a = 0;
1190 break;
1191 }
1192
1193 /* initialize the wol settings based on the eeprom settings */
1194 adapter->wol = adapter->eeprom_wol;
1195 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1196
1197 /* Auto detect PHY address */
1198 if (hw->mac_type == e1000_ce4100) {
1199 for (i = 0; i < 32; i++) {
1200 hw->phy_addr = i;
1201 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1202
1203 if (tmp != 0 && tmp != 0xFF)
1204 break;
1205 }
1206
1207 if (i >= 32)
1208 goto err_eeprom;
1209 }
1210
1211 /* reset the hardware with the new settings */
1212 e1000_reset(adapter);
1213
1214 strcpy(netdev->name, "eth%d");
1215 err = register_netdev(netdev);
1216 if (err)
1217 goto err_register;
1218
1219 e1000_vlan_filter_on_off(adapter, false);
1220
1221 /* print bus type/speed/width info */
1222 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1223 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1224 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1225 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1226 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1227 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1228 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1229 netdev->dev_addr);
1230
1231 /* carrier off reporting is important to ethtool even BEFORE open */
1232 netif_carrier_off(netdev);
1233
1234 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1235
1236 cards_found++;
1237 return 0;
1238
1239err_register:
1240err_eeprom:
1241 e1000_phy_hw_reset(hw);
1242
1243 if (hw->flash_address)
1244 iounmap(hw->flash_address);
1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring);
1247err_dma:
1248err_sw_init:
1249err_mdio_ioremap:
1250 iounmap(hw->ce4100_gbe_mdio_base_virt);
1251 iounmap(hw->hw_addr);
1252err_ioremap:
1253 free_netdev(netdev);
1254err_alloc_etherdev:
1255 pci_release_selected_regions(pdev, bars);
1256err_pci_reg:
1257 pci_disable_device(pdev);
1258 return err;
1259}
1260
1261/**
1262 * e1000_remove - Device Removal Routine
1263 * @pdev: PCI device information struct
1264 *
1265 * e1000_remove is called by the PCI subsystem to alert the driver
1266 * that it should release a PCI device. That could be caused by a
1267 * Hot-Plug event, or because the driver is going to be removed from
1268 * memory.
1269 **/
1270static void e1000_remove(struct pci_dev *pdev)
1271{
1272 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw;
1275
1276 e1000_down_and_stop(adapter);
1277 e1000_release_manageability(adapter);
1278
1279 unregister_netdev(netdev);
1280
1281 e1000_phy_hw_reset(hw);
1282
1283 kfree(adapter->tx_ring);
1284 kfree(adapter->rx_ring);
1285
1286 if (hw->mac_type == e1000_ce4100)
1287 iounmap(hw->ce4100_gbe_mdio_base_virt);
1288 iounmap(hw->hw_addr);
1289 if (hw->flash_address)
1290 iounmap(hw->flash_address);
1291 pci_release_selected_regions(pdev, adapter->bars);
1292
1293 free_netdev(netdev);
1294
1295 pci_disable_device(pdev);
1296}
1297
1298/**
1299 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1300 * @adapter: board private structure to initialize
1301 *
1302 * e1000_sw_init initializes the Adapter private data structure.
1303 * e1000_init_hw_struct MUST be called before this function
1304 **/
1305static int e1000_sw_init(struct e1000_adapter *adapter)
1306{
1307 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1308
1309 adapter->num_tx_queues = 1;
1310 adapter->num_rx_queues = 1;
1311
1312 if (e1000_alloc_queues(adapter)) {
1313 e_err(probe, "Unable to allocate memory for queues\n");
1314 return -ENOMEM;
1315 }
1316
1317 /* Explicitly disable IRQ since the NIC can be in any state. */
1318 e1000_irq_disable(adapter);
1319
1320 spin_lock_init(&adapter->stats_lock);
1321
1322 set_bit(__E1000_DOWN, &adapter->flags);
1323
1324 return 0;
1325}
1326
1327/**
1328 * e1000_alloc_queues - Allocate memory for all rings
1329 * @adapter: board private structure to initialize
1330 *
1331 * We allocate one ring per queue at run-time since we don't know the
1332 * number of queues at compile-time.
1333 **/
1334static int e1000_alloc_queues(struct e1000_adapter *adapter)
1335{
1336 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1337 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1338 if (!adapter->tx_ring)
1339 return -ENOMEM;
1340
1341 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1342 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1343 if (!adapter->rx_ring) {
1344 kfree(adapter->tx_ring);
1345 return -ENOMEM;
1346 }
1347
1348 return E1000_SUCCESS;
1349}
1350
1351/**
1352 * e1000_open - Called when a network interface is made active
1353 * @netdev: network interface device structure
1354 *
1355 * Returns 0 on success, negative value on failure
1356 *
1357 * The open entry point is called when a network interface is made
1358 * active by the system (IFF_UP). At this point all resources needed
1359 * for transmit and receive operations are allocated, the interrupt
1360 * handler is registered with the OS, the watchdog task is started,
1361 * and the stack is notified that the interface is ready.
1362 **/
1363static int e1000_open(struct net_device *netdev)
1364{
1365 struct e1000_adapter *adapter = netdev_priv(netdev);
1366 struct e1000_hw *hw = &adapter->hw;
1367 int err;
1368
1369 /* disallow open during test */
1370 if (test_bit(__E1000_TESTING, &adapter->flags))
1371 return -EBUSY;
1372
1373 netif_carrier_off(netdev);
1374
1375 /* allocate transmit descriptors */
1376 err = e1000_setup_all_tx_resources(adapter);
1377 if (err)
1378 goto err_setup_tx;
1379
1380 /* allocate receive descriptors */
1381 err = e1000_setup_all_rx_resources(adapter);
1382 if (err)
1383 goto err_setup_rx;
1384
1385 e1000_power_up_phy(adapter);
1386
1387 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1388 if ((hw->mng_cookie.status &
1389 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1390 e1000_update_mng_vlan(adapter);
1391 }
1392
1393 /* before we allocate an interrupt, we must be ready to handle it.
1394 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1395 * as soon as we call pci_request_irq, so we have to setup our
1396 * clean_rx handler before we do so.
1397 */
1398 e1000_configure(adapter);
1399
1400 err = e1000_request_irq(adapter);
1401 if (err)
1402 goto err_req_irq;
1403
1404 /* From here on the code is the same as e1000_up() */
1405 clear_bit(__E1000_DOWN, &adapter->flags);
1406
1407 napi_enable(&adapter->napi);
1408
1409 e1000_irq_enable(adapter);
1410
1411 netif_start_queue(netdev);
1412
1413 /* fire a link status change interrupt to start the watchdog */
1414 ew32(ICS, E1000_ICS_LSC);
1415
1416 return E1000_SUCCESS;
1417
1418err_req_irq:
1419 e1000_power_down_phy(adapter);
1420 e1000_free_all_rx_resources(adapter);
1421err_setup_rx:
1422 e1000_free_all_tx_resources(adapter);
1423err_setup_tx:
1424 e1000_reset(adapter);
1425
1426 return err;
1427}
1428
1429/**
1430 * e1000_close - Disables a network interface
1431 * @netdev: network interface device structure
1432 *
1433 * Returns 0, this is not allowed to fail
1434 *
1435 * The close entry point is called when an interface is de-activated
1436 * by the OS. The hardware is still under the drivers control, but
1437 * needs to be disabled. A global MAC reset is issued to stop the
1438 * hardware, and all transmit and receive resources are freed.
1439 **/
1440static int e1000_close(struct net_device *netdev)
1441{
1442 struct e1000_adapter *adapter = netdev_priv(netdev);
1443 struct e1000_hw *hw = &adapter->hw;
1444 int count = E1000_CHECK_RESET_COUNT;
1445
1446 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1447 usleep_range(10000, 20000);
1448
1449 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1450 e1000_down(adapter);
1451 e1000_power_down_phy(adapter);
1452 e1000_free_irq(adapter);
1453
1454 e1000_free_all_tx_resources(adapter);
1455 e1000_free_all_rx_resources(adapter);
1456
1457 /* kill manageability vlan ID if supported, but not if a vlan with
1458 * the same ID is registered on the host OS (let 8021q kill it)
1459 */
1460 if ((hw->mng_cookie.status &
1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1463 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1464 adapter->mng_vlan_id);
1465 }
1466
1467 return 0;
1468}
1469
1470/**
1471 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1472 * @adapter: address of board private structure
1473 * @start: address of beginning of memory
1474 * @len: length of memory
1475 **/
1476static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1477 unsigned long len)
1478{
1479 struct e1000_hw *hw = &adapter->hw;
1480 unsigned long begin = (unsigned long)start;
1481 unsigned long end = begin + len;
1482
1483 /* First rev 82545 and 82546 need to not allow any memory
1484 * write location to cross 64k boundary due to errata 23
1485 */
1486 if (hw->mac_type == e1000_82545 ||
1487 hw->mac_type == e1000_ce4100 ||
1488 hw->mac_type == e1000_82546) {
1489 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1490 }
1491
1492 return true;
1493}
1494
1495/**
1496 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1497 * @adapter: board private structure
1498 * @txdr: tx descriptor ring (for a specific queue) to setup
1499 *
1500 * Return 0 on success, negative on failure
1501 **/
1502static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1503 struct e1000_tx_ring *txdr)
1504{
1505 struct pci_dev *pdev = adapter->pdev;
1506 int size;
1507
1508 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1509 txdr->buffer_info = vzalloc(size);
1510 if (!txdr->buffer_info)
1511 return -ENOMEM;
1512
1513 /* round up to nearest 4K */
1514
1515 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1516 txdr->size = ALIGN(txdr->size, 4096);
1517
1518 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1519 GFP_KERNEL);
1520 if (!txdr->desc) {
1521setup_tx_desc_die:
1522 vfree(txdr->buffer_info);
1523 return -ENOMEM;
1524 }
1525
1526 /* Fix for errata 23, can't cross 64kB boundary */
1527 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528 void *olddesc = txdr->desc;
1529 dma_addr_t olddma = txdr->dma;
1530 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1531 txdr->size, txdr->desc);
1532 /* Try again, without freeing the previous */
1533 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1534 &txdr->dma, GFP_KERNEL);
1535 /* Failed allocation, critical failure */
1536 if (!txdr->desc) {
1537 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1538 olddma);
1539 goto setup_tx_desc_die;
1540 }
1541
1542 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1543 /* give up */
1544 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1545 txdr->dma);
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
1548 e_err(probe, "Unable to allocate aligned memory "
1549 "for the transmit descriptor ring\n");
1550 vfree(txdr->buffer_info);
1551 return -ENOMEM;
1552 } else {
1553 /* Free old allocation, new allocation was successful */
1554 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1555 olddma);
1556 }
1557 }
1558 memset(txdr->desc, 0, txdr->size);
1559
1560 txdr->next_to_use = 0;
1561 txdr->next_to_clean = 0;
1562
1563 return 0;
1564}
1565
1566/**
1567 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1568 * (Descriptors) for all queues
1569 * @adapter: board private structure
1570 *
1571 * Return 0 on success, negative on failure
1572 **/
1573int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1574{
1575 int i, err = 0;
1576
1577 for (i = 0; i < adapter->num_tx_queues; i++) {
1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1579 if (err) {
1580 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1581 for (i-- ; i >= 0; i--)
1582 e1000_free_tx_resources(adapter,
1583 &adapter->tx_ring[i]);
1584 break;
1585 }
1586 }
1587
1588 return err;
1589}
1590
1591/**
1592 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1593 * @adapter: board private structure
1594 *
1595 * Configure the Tx unit of the MAC after a reset.
1596 **/
1597static void e1000_configure_tx(struct e1000_adapter *adapter)
1598{
1599 u64 tdba;
1600 struct e1000_hw *hw = &adapter->hw;
1601 u32 tdlen, tctl, tipg;
1602 u32 ipgr1, ipgr2;
1603
1604 /* Setup the HW Tx Head and Tail descriptor pointers */
1605
1606 switch (adapter->num_tx_queues) {
1607 case 1:
1608 default:
1609 tdba = adapter->tx_ring[0].dma;
1610 tdlen = adapter->tx_ring[0].count *
1611 sizeof(struct e1000_tx_desc);
1612 ew32(TDLEN, tdlen);
1613 ew32(TDBAH, (tdba >> 32));
1614 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1615 ew32(TDT, 0);
1616 ew32(TDH, 0);
1617 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDH : E1000_82542_TDH);
1619 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1620 E1000_TDT : E1000_82542_TDT);
1621 break;
1622 }
1623
1624 /* Set the default values for the Tx Inter Packet Gap timer */
1625 if ((hw->media_type == e1000_media_type_fiber ||
1626 hw->media_type == e1000_media_type_internal_serdes))
1627 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1628 else
1629 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1630
1631 switch (hw->mac_type) {
1632 case e1000_82542_rev2_0:
1633 case e1000_82542_rev2_1:
1634 tipg = DEFAULT_82542_TIPG_IPGT;
1635 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1637 break;
1638 default:
1639 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1640 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1641 break;
1642 }
1643 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1644 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1645 ew32(TIPG, tipg);
1646
1647 /* Set the Tx Interrupt Delay register */
1648
1649 ew32(TIDV, adapter->tx_int_delay);
1650 if (hw->mac_type >= e1000_82540)
1651 ew32(TADV, adapter->tx_abs_int_delay);
1652
1653 /* Program the Transmit Control Register */
1654
1655 tctl = er32(TCTL);
1656 tctl &= ~E1000_TCTL_CT;
1657 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1658 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1659
1660 e1000_config_collision_dist(hw);
1661
1662 /* Setup Transmit Descriptor Settings for eop descriptor */
1663 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1664
1665 /* only set IDE if we are delaying interrupts using the timers */
1666 if (adapter->tx_int_delay)
1667 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1668
1669 if (hw->mac_type < e1000_82543)
1670 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1671 else
1672 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1673
1674 /* Cache if we're 82544 running in PCI-X because we'll
1675 * need this to apply a workaround later in the send path.
1676 */
1677 if (hw->mac_type == e1000_82544 &&
1678 hw->bus_type == e1000_bus_type_pcix)
1679 adapter->pcix_82544 = true;
1680
1681 ew32(TCTL, tctl);
1682
1683}
1684
1685/**
1686 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1687 * @adapter: board private structure
1688 * @rxdr: rx descriptor ring (for a specific queue) to setup
1689 *
1690 * Returns 0 on success, negative on failure
1691 **/
1692static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1693 struct e1000_rx_ring *rxdr)
1694{
1695 struct pci_dev *pdev = adapter->pdev;
1696 int size, desc_len;
1697
1698 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1699 rxdr->buffer_info = vzalloc(size);
1700 if (!rxdr->buffer_info)
1701 return -ENOMEM;
1702
1703 desc_len = sizeof(struct e1000_rx_desc);
1704
1705 /* Round up to nearest 4K */
1706
1707 rxdr->size = rxdr->count * desc_len;
1708 rxdr->size = ALIGN(rxdr->size, 4096);
1709
1710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1711 GFP_KERNEL);
1712 if (!rxdr->desc) {
1713setup_rx_desc_die:
1714 vfree(rxdr->buffer_info);
1715 return -ENOMEM;
1716 }
1717
1718 /* Fix for errata 23, can't cross 64kB boundary */
1719 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720 void *olddesc = rxdr->desc;
1721 dma_addr_t olddma = rxdr->dma;
1722 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1723 rxdr->size, rxdr->desc);
1724 /* Try again, without freeing the previous */
1725 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1726 &rxdr->dma, GFP_KERNEL);
1727 /* Failed allocation, critical failure */
1728 if (!rxdr->desc) {
1729 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1730 olddma);
1731 goto setup_rx_desc_die;
1732 }
1733
1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 /* give up */
1736 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1737 rxdr->dma);
1738 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1739 olddma);
1740 e_err(probe, "Unable to allocate aligned memory for "
1741 "the Rx descriptor ring\n");
1742 goto setup_rx_desc_die;
1743 } else {
1744 /* Free old allocation, new allocation was successful */
1745 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746 olddma);
1747 }
1748 }
1749 memset(rxdr->desc, 0, rxdr->size);
1750
1751 rxdr->next_to_clean = 0;
1752 rxdr->next_to_use = 0;
1753 rxdr->rx_skb_top = NULL;
1754
1755 return 0;
1756}
1757
1758/**
1759 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1760 * (Descriptors) for all queues
1761 * @adapter: board private structure
1762 *
1763 * Return 0 on success, negative on failure
1764 **/
1765int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1766{
1767 int i, err = 0;
1768
1769 for (i = 0; i < adapter->num_rx_queues; i++) {
1770 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1771 if (err) {
1772 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1773 for (i-- ; i >= 0; i--)
1774 e1000_free_rx_resources(adapter,
1775 &adapter->rx_ring[i]);
1776 break;
1777 }
1778 }
1779
1780 return err;
1781}
1782
1783/**
1784 * e1000_setup_rctl - configure the receive control registers
1785 * @adapter: Board private structure
1786 **/
1787static void e1000_setup_rctl(struct e1000_adapter *adapter)
1788{
1789 struct e1000_hw *hw = &adapter->hw;
1790 u32 rctl;
1791
1792 rctl = er32(RCTL);
1793
1794 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1795
1796 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1797 E1000_RCTL_RDMTS_HALF |
1798 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1799
1800 if (hw->tbi_compatibility_on == 1)
1801 rctl |= E1000_RCTL_SBP;
1802 else
1803 rctl &= ~E1000_RCTL_SBP;
1804
1805 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1806 rctl &= ~E1000_RCTL_LPE;
1807 else
1808 rctl |= E1000_RCTL_LPE;
1809
1810 /* Setup buffer sizes */
1811 rctl &= ~E1000_RCTL_SZ_4096;
1812 rctl |= E1000_RCTL_BSEX;
1813 switch (adapter->rx_buffer_len) {
1814 case E1000_RXBUFFER_2048:
1815 default:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 case E1000_RXBUFFER_4096:
1820 rctl |= E1000_RCTL_SZ_4096;
1821 break;
1822 case E1000_RXBUFFER_8192:
1823 rctl |= E1000_RCTL_SZ_8192;
1824 break;
1825 case E1000_RXBUFFER_16384:
1826 rctl |= E1000_RCTL_SZ_16384;
1827 break;
1828 }
1829
1830 /* This is useful for sniffing bad packets. */
1831 if (adapter->netdev->features & NETIF_F_RXALL) {
1832 /* UPE and MPE will be handled by normal PROMISC logic
1833 * in e1000e_set_rx_mode
1834 */
1835 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1836 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1837 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1838
1839 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1840 E1000_RCTL_DPF | /* Allow filtered pause */
1841 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1842 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1843 * and that breaks VLANs.
1844 */
1845 }
1846
1847 ew32(RCTL, rctl);
1848}
1849
1850/**
1851 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1852 * @adapter: board private structure
1853 *
1854 * Configure the Rx unit of the MAC after a reset.
1855 **/
1856static void e1000_configure_rx(struct e1000_adapter *adapter)
1857{
1858 u64 rdba;
1859 struct e1000_hw *hw = &adapter->hw;
1860 u32 rdlen, rctl, rxcsum;
1861
1862 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1863 rdlen = adapter->rx_ring[0].count *
1864 sizeof(struct e1000_rx_desc);
1865 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1866 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1867 } else {
1868 rdlen = adapter->rx_ring[0].count *
1869 sizeof(struct e1000_rx_desc);
1870 adapter->clean_rx = e1000_clean_rx_irq;
1871 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1872 }
1873
1874 /* disable receives while setting up the descriptors */
1875 rctl = er32(RCTL);
1876 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1877
1878 /* set the Receive Delay Timer Register */
1879 ew32(RDTR, adapter->rx_int_delay);
1880
1881 if (hw->mac_type >= e1000_82540) {
1882 ew32(RADV, adapter->rx_abs_int_delay);
1883 if (adapter->itr_setting != 0)
1884 ew32(ITR, 1000000000 / (adapter->itr * 256));
1885 }
1886
1887 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1888 * the Base and Length of the Rx Descriptor Ring
1889 */
1890 switch (adapter->num_rx_queues) {
1891 case 1:
1892 default:
1893 rdba = adapter->rx_ring[0].dma;
1894 ew32(RDLEN, rdlen);
1895 ew32(RDBAH, (rdba >> 32));
1896 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1897 ew32(RDT, 0);
1898 ew32(RDH, 0);
1899 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1900 E1000_RDH : E1000_82542_RDH);
1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1902 E1000_RDT : E1000_82542_RDT);
1903 break;
1904 }
1905
1906 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1907 if (hw->mac_type >= e1000_82543) {
1908 rxcsum = er32(RXCSUM);
1909 if (adapter->rx_csum)
1910 rxcsum |= E1000_RXCSUM_TUOFL;
1911 else
1912 /* don't need to clear IPPCSE as it defaults to 0 */
1913 rxcsum &= ~E1000_RXCSUM_TUOFL;
1914 ew32(RXCSUM, rxcsum);
1915 }
1916
1917 /* Enable Receives */
1918 ew32(RCTL, rctl | E1000_RCTL_EN);
1919}
1920
1921/**
1922 * e1000_free_tx_resources - Free Tx Resources per Queue
1923 * @adapter: board private structure
1924 * @tx_ring: Tx descriptor ring for a specific queue
1925 *
1926 * Free all transmit software resources
1927 **/
1928static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1929 struct e1000_tx_ring *tx_ring)
1930{
1931 struct pci_dev *pdev = adapter->pdev;
1932
1933 e1000_clean_tx_ring(adapter, tx_ring);
1934
1935 vfree(tx_ring->buffer_info);
1936 tx_ring->buffer_info = NULL;
1937
1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1939 tx_ring->dma);
1940
1941 tx_ring->desc = NULL;
1942}
1943
1944/**
1945 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1946 * @adapter: board private structure
1947 *
1948 * Free all transmit software resources
1949 **/
1950void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1951{
1952 int i;
1953
1954 for (i = 0; i < adapter->num_tx_queues; i++)
1955 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1956}
1957
1958static void
1959e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1960 struct e1000_tx_buffer *buffer_info)
1961{
1962 if (buffer_info->dma) {
1963 if (buffer_info->mapped_as_page)
1964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length, DMA_TO_DEVICE);
1966 else
1967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1968 buffer_info->length,
1969 DMA_TO_DEVICE);
1970 buffer_info->dma = 0;
1971 }
1972 if (buffer_info->skb) {
1973 dev_kfree_skb_any(buffer_info->skb);
1974 buffer_info->skb = NULL;
1975 }
1976 buffer_info->time_stamp = 0;
1977 /* buffer_info must be completely set up in the transmit path */
1978}
1979
1980/**
1981 * e1000_clean_tx_ring - Free Tx Buffers
1982 * @adapter: board private structure
1983 * @tx_ring: ring to be cleaned
1984 **/
1985static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1986 struct e1000_tx_ring *tx_ring)
1987{
1988 struct e1000_hw *hw = &adapter->hw;
1989 struct e1000_tx_buffer *buffer_info;
1990 unsigned long size;
1991 unsigned int i;
1992
1993 /* Free all the Tx ring sk_buffs */
1994
1995 for (i = 0; i < tx_ring->count; i++) {
1996 buffer_info = &tx_ring->buffer_info[i];
1997 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1998 }
1999
2000 netdev_reset_queue(adapter->netdev);
2001 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2002 memset(tx_ring->buffer_info, 0, size);
2003
2004 /* Zero out the descriptor ring */
2005
2006 memset(tx_ring->desc, 0, tx_ring->size);
2007
2008 tx_ring->next_to_use = 0;
2009 tx_ring->next_to_clean = 0;
2010 tx_ring->last_tx_tso = false;
2011
2012 writel(0, hw->hw_addr + tx_ring->tdh);
2013 writel(0, hw->hw_addr + tx_ring->tdt);
2014}
2015
2016/**
2017 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2018 * @adapter: board private structure
2019 **/
2020static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2021{
2022 int i;
2023
2024 for (i = 0; i < adapter->num_tx_queues; i++)
2025 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2026}
2027
2028/**
2029 * e1000_free_rx_resources - Free Rx Resources
2030 * @adapter: board private structure
2031 * @rx_ring: ring to clean the resources from
2032 *
2033 * Free all receive software resources
2034 **/
2035static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2036 struct e1000_rx_ring *rx_ring)
2037{
2038 struct pci_dev *pdev = adapter->pdev;
2039
2040 e1000_clean_rx_ring(adapter, rx_ring);
2041
2042 vfree(rx_ring->buffer_info);
2043 rx_ring->buffer_info = NULL;
2044
2045 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2046 rx_ring->dma);
2047
2048 rx_ring->desc = NULL;
2049}
2050
2051/**
2052 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2053 * @adapter: board private structure
2054 *
2055 * Free all receive software resources
2056 **/
2057void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2058{
2059 int i;
2060
2061 for (i = 0; i < adapter->num_rx_queues; i++)
2062 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2063}
2064
2065#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2066static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2067{
2068 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2070}
2071
2072static void *e1000_alloc_frag(const struct e1000_adapter *a)
2073{
2074 unsigned int len = e1000_frag_len(a);
2075 u8 *data = netdev_alloc_frag(len);
2076
2077 if (likely(data))
2078 data += E1000_HEADROOM;
2079 return data;
2080}
2081
2082/**
2083 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2084 * @adapter: board private structure
2085 * @rx_ring: ring to free buffers from
2086 **/
2087static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2088 struct e1000_rx_ring *rx_ring)
2089{
2090 struct e1000_hw *hw = &adapter->hw;
2091 struct e1000_rx_buffer *buffer_info;
2092 struct pci_dev *pdev = adapter->pdev;
2093 unsigned long size;
2094 unsigned int i;
2095
2096 /* Free all the Rx netfrags */
2097 for (i = 0; i < rx_ring->count; i++) {
2098 buffer_info = &rx_ring->buffer_info[i];
2099 if (adapter->clean_rx == e1000_clean_rx_irq) {
2100 if (buffer_info->dma)
2101 dma_unmap_single(&pdev->dev, buffer_info->dma,
2102 adapter->rx_buffer_len,
2103 DMA_FROM_DEVICE);
2104 if (buffer_info->rxbuf.data) {
2105 skb_free_frag(buffer_info->rxbuf.data);
2106 buffer_info->rxbuf.data = NULL;
2107 }
2108 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109 if (buffer_info->dma)
2110 dma_unmap_page(&pdev->dev, buffer_info->dma,
2111 adapter->rx_buffer_len,
2112 DMA_FROM_DEVICE);
2113 if (buffer_info->rxbuf.page) {
2114 put_page(buffer_info->rxbuf.page);
2115 buffer_info->rxbuf.page = NULL;
2116 }
2117 }
2118
2119 buffer_info->dma = 0;
2120 }
2121
2122 /* there also may be some cached data from a chained receive */
2123 napi_free_frags(&adapter->napi);
2124 rx_ring->rx_skb_top = NULL;
2125
2126 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2127 memset(rx_ring->buffer_info, 0, size);
2128
2129 /* Zero out the descriptor ring */
2130 memset(rx_ring->desc, 0, rx_ring->size);
2131
2132 rx_ring->next_to_clean = 0;
2133 rx_ring->next_to_use = 0;
2134
2135 writel(0, hw->hw_addr + rx_ring->rdh);
2136 writel(0, hw->hw_addr + rx_ring->rdt);
2137}
2138
2139/**
2140 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2141 * @adapter: board private structure
2142 **/
2143static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2144{
2145 int i;
2146
2147 for (i = 0; i < adapter->num_rx_queues; i++)
2148 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2149}
2150
2151/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2152 * and memory write and invalidate disabled for certain operations
2153 */
2154static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2155{
2156 struct e1000_hw *hw = &adapter->hw;
2157 struct net_device *netdev = adapter->netdev;
2158 u32 rctl;
2159
2160 e1000_pci_clear_mwi(hw);
2161
2162 rctl = er32(RCTL);
2163 rctl |= E1000_RCTL_RST;
2164 ew32(RCTL, rctl);
2165 E1000_WRITE_FLUSH();
2166 mdelay(5);
2167
2168 if (netif_running(netdev))
2169 e1000_clean_all_rx_rings(adapter);
2170}
2171
2172static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2173{
2174 struct e1000_hw *hw = &adapter->hw;
2175 struct net_device *netdev = adapter->netdev;
2176 u32 rctl;
2177
2178 rctl = er32(RCTL);
2179 rctl &= ~E1000_RCTL_RST;
2180 ew32(RCTL, rctl);
2181 E1000_WRITE_FLUSH();
2182 mdelay(5);
2183
2184 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2185 e1000_pci_set_mwi(hw);
2186
2187 if (netif_running(netdev)) {
2188 /* No need to loop, because 82542 supports only 1 queue */
2189 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2190 e1000_configure_rx(adapter);
2191 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2192 }
2193}
2194
2195/**
2196 * e1000_set_mac - Change the Ethernet Address of the NIC
2197 * @netdev: network interface device structure
2198 * @p: pointer to an address structure
2199 *
2200 * Returns 0 on success, negative on failure
2201 **/
2202static int e1000_set_mac(struct net_device *netdev, void *p)
2203{
2204 struct e1000_adapter *adapter = netdev_priv(netdev);
2205 struct e1000_hw *hw = &adapter->hw;
2206 struct sockaddr *addr = p;
2207
2208 if (!is_valid_ether_addr(addr->sa_data))
2209 return -EADDRNOTAVAIL;
2210
2211 /* 82542 2.0 needs to be in reset to write receive address registers */
2212
2213 if (hw->mac_type == e1000_82542_rev2_0)
2214 e1000_enter_82542_rst(adapter);
2215
2216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2217 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2218
2219 e1000_rar_set(hw, hw->mac_addr, 0);
2220
2221 if (hw->mac_type == e1000_82542_rev2_0)
2222 e1000_leave_82542_rst(adapter);
2223
2224 return 0;
2225}
2226
2227/**
2228 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2229 * @netdev: network interface device structure
2230 *
2231 * The set_rx_mode entry point is called whenever the unicast or multicast
2232 * address lists or the network interface flags are updated. This routine is
2233 * responsible for configuring the hardware for proper unicast, multicast,
2234 * promiscuous mode, and all-multi behavior.
2235 **/
2236static void e1000_set_rx_mode(struct net_device *netdev)
2237{
2238 struct e1000_adapter *adapter = netdev_priv(netdev);
2239 struct e1000_hw *hw = &adapter->hw;
2240 struct netdev_hw_addr *ha;
2241 bool use_uc = false;
2242 u32 rctl;
2243 u32 hash_value;
2244 int i, rar_entries = E1000_RAR_ENTRIES;
2245 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2246 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2247
2248 if (!mcarray)
2249 return;
2250
2251 /* Check for Promiscuous and All Multicast modes */
2252
2253 rctl = er32(RCTL);
2254
2255 if (netdev->flags & IFF_PROMISC) {
2256 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2257 rctl &= ~E1000_RCTL_VFE;
2258 } else {
2259 if (netdev->flags & IFF_ALLMULTI)
2260 rctl |= E1000_RCTL_MPE;
2261 else
2262 rctl &= ~E1000_RCTL_MPE;
2263 /* Enable VLAN filter if there is a VLAN */
2264 if (e1000_vlan_used(adapter))
2265 rctl |= E1000_RCTL_VFE;
2266 }
2267
2268 if (netdev_uc_count(netdev) > rar_entries - 1) {
2269 rctl |= E1000_RCTL_UPE;
2270 } else if (!(netdev->flags & IFF_PROMISC)) {
2271 rctl &= ~E1000_RCTL_UPE;
2272 use_uc = true;
2273 }
2274
2275 ew32(RCTL, rctl);
2276
2277 /* 82542 2.0 needs to be in reset to write receive address registers */
2278
2279 if (hw->mac_type == e1000_82542_rev2_0)
2280 e1000_enter_82542_rst(adapter);
2281
2282 /* load the first 14 addresses into the exact filters 1-14. Unicast
2283 * addresses take precedence to avoid disabling unicast filtering
2284 * when possible.
2285 *
2286 * RAR 0 is used for the station MAC address
2287 * if there are not 14 addresses, go ahead and clear the filters
2288 */
2289 i = 1;
2290 if (use_uc)
2291 netdev_for_each_uc_addr(ha, netdev) {
2292 if (i == rar_entries)
2293 break;
2294 e1000_rar_set(hw, ha->addr, i++);
2295 }
2296
2297 netdev_for_each_mc_addr(ha, netdev) {
2298 if (i == rar_entries) {
2299 /* load any remaining addresses into the hash table */
2300 u32 hash_reg, hash_bit, mta;
2301 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2302 hash_reg = (hash_value >> 5) & 0x7F;
2303 hash_bit = hash_value & 0x1F;
2304 mta = (1 << hash_bit);
2305 mcarray[hash_reg] |= mta;
2306 } else {
2307 e1000_rar_set(hw, ha->addr, i++);
2308 }
2309 }
2310
2311 for (; i < rar_entries; i++) {
2312 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2313 E1000_WRITE_FLUSH();
2314 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2315 E1000_WRITE_FLUSH();
2316 }
2317
2318 /* write the hash table completely, write from bottom to avoid
2319 * both stupid write combining chipsets, and flushing each write
2320 */
2321 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2322 /* If we are on an 82544 has an errata where writing odd
2323 * offsets overwrites the previous even offset, but writing
2324 * backwards over the range solves the issue by always
2325 * writing the odd offset first
2326 */
2327 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2328 }
2329 E1000_WRITE_FLUSH();
2330
2331 if (hw->mac_type == e1000_82542_rev2_0)
2332 e1000_leave_82542_rst(adapter);
2333
2334 kfree(mcarray);
2335}
2336
2337/**
2338 * e1000_update_phy_info_task - get phy info
2339 * @work: work struct contained inside adapter struct
2340 *
2341 * Need to wait a few seconds after link up to get diagnostic information from
2342 * the phy
2343 */
2344static void e1000_update_phy_info_task(struct work_struct *work)
2345{
2346 struct e1000_adapter *adapter = container_of(work,
2347 struct e1000_adapter,
2348 phy_info_task.work);
2349
2350 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2351}
2352
2353/**
2354 * e1000_82547_tx_fifo_stall_task - task to complete work
2355 * @work: work struct contained inside adapter struct
2356 **/
2357static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2358{
2359 struct e1000_adapter *adapter = container_of(work,
2360 struct e1000_adapter,
2361 fifo_stall_task.work);
2362 struct e1000_hw *hw = &adapter->hw;
2363 struct net_device *netdev = adapter->netdev;
2364 u32 tctl;
2365
2366 if (atomic_read(&adapter->tx_fifo_stall)) {
2367 if ((er32(TDT) == er32(TDH)) &&
2368 (er32(TDFT) == er32(TDFH)) &&
2369 (er32(TDFTS) == er32(TDFHS))) {
2370 tctl = er32(TCTL);
2371 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2372 ew32(TDFT, adapter->tx_head_addr);
2373 ew32(TDFH, adapter->tx_head_addr);
2374 ew32(TDFTS, adapter->tx_head_addr);
2375 ew32(TDFHS, adapter->tx_head_addr);
2376 ew32(TCTL, tctl);
2377 E1000_WRITE_FLUSH();
2378
2379 adapter->tx_fifo_head = 0;
2380 atomic_set(&adapter->tx_fifo_stall, 0);
2381 netif_wake_queue(netdev);
2382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2383 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2384 }
2385 }
2386}
2387
2388bool e1000_has_link(struct e1000_adapter *adapter)
2389{
2390 struct e1000_hw *hw = &adapter->hw;
2391 bool link_active = false;
2392
2393 /* get_link_status is set on LSC (link status) interrupt or rx
2394 * sequence error interrupt (except on intel ce4100).
2395 * get_link_status will stay false until the
2396 * e1000_check_for_link establishes link for copper adapters
2397 * ONLY
2398 */
2399 switch (hw->media_type) {
2400 case e1000_media_type_copper:
2401 if (hw->mac_type == e1000_ce4100)
2402 hw->get_link_status = 1;
2403 if (hw->get_link_status) {
2404 e1000_check_for_link(hw);
2405 link_active = !hw->get_link_status;
2406 } else {
2407 link_active = true;
2408 }
2409 break;
2410 case e1000_media_type_fiber:
2411 e1000_check_for_link(hw);
2412 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2413 break;
2414 case e1000_media_type_internal_serdes:
2415 e1000_check_for_link(hw);
2416 link_active = hw->serdes_has_link;
2417 break;
2418 default:
2419 break;
2420 }
2421
2422 return link_active;
2423}
2424
2425/**
2426 * e1000_watchdog - work function
2427 * @work: work struct contained inside adapter struct
2428 **/
2429static void e1000_watchdog(struct work_struct *work)
2430{
2431 struct e1000_adapter *adapter = container_of(work,
2432 struct e1000_adapter,
2433 watchdog_task.work);
2434 struct e1000_hw *hw = &adapter->hw;
2435 struct net_device *netdev = adapter->netdev;
2436 struct e1000_tx_ring *txdr = adapter->tx_ring;
2437 u32 link, tctl;
2438
2439 link = e1000_has_link(adapter);
2440 if ((netif_carrier_ok(netdev)) && link)
2441 goto link_up;
2442
2443 if (link) {
2444 if (!netif_carrier_ok(netdev)) {
2445 u32 ctrl;
2446 bool txb2b = true;
2447 /* update snapshot of PHY registers on LSC */
2448 e1000_get_speed_and_duplex(hw,
2449 &adapter->link_speed,
2450 &adapter->link_duplex);
2451
2452 ctrl = er32(CTRL);
2453 pr_info("%s NIC Link is Up %d Mbps %s, "
2454 "Flow Control: %s\n",
2455 netdev->name,
2456 adapter->link_speed,
2457 adapter->link_duplex == FULL_DUPLEX ?
2458 "Full Duplex" : "Half Duplex",
2459 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2460 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2461 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2462 E1000_CTRL_TFCE) ? "TX" : "None")));
2463
2464 /* adjust timeout factor according to speed/duplex */
2465 adapter->tx_timeout_factor = 1;
2466 switch (adapter->link_speed) {
2467 case SPEED_10:
2468 txb2b = false;
2469 adapter->tx_timeout_factor = 16;
2470 break;
2471 case SPEED_100:
2472 txb2b = false;
2473 /* maybe add some timeout factor ? */
2474 break;
2475 }
2476
2477 /* enable transmits in the hardware */
2478 tctl = er32(TCTL);
2479 tctl |= E1000_TCTL_EN;
2480 ew32(TCTL, tctl);
2481
2482 netif_carrier_on(netdev);
2483 if (!test_bit(__E1000_DOWN, &adapter->flags))
2484 schedule_delayed_work(&adapter->phy_info_task,
2485 2 * HZ);
2486 adapter->smartspeed = 0;
2487 }
2488 } else {
2489 if (netif_carrier_ok(netdev)) {
2490 adapter->link_speed = 0;
2491 adapter->link_duplex = 0;
2492 pr_info("%s NIC Link is Down\n",
2493 netdev->name);
2494 netif_carrier_off(netdev);
2495
2496 if (!test_bit(__E1000_DOWN, &adapter->flags))
2497 schedule_delayed_work(&adapter->phy_info_task,
2498 2 * HZ);
2499 }
2500
2501 e1000_smartspeed(adapter);
2502 }
2503
2504link_up:
2505 e1000_update_stats(adapter);
2506
2507 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2508 adapter->tpt_old = adapter->stats.tpt;
2509 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2510 adapter->colc_old = adapter->stats.colc;
2511
2512 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2513 adapter->gorcl_old = adapter->stats.gorcl;
2514 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2515 adapter->gotcl_old = adapter->stats.gotcl;
2516
2517 e1000_update_adaptive(hw);
2518
2519 if (!netif_carrier_ok(netdev)) {
2520 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2521 /* We've lost link, so the controller stops DMA,
2522 * but we've got queued Tx work that's never going
2523 * to get done, so reset controller to flush Tx.
2524 * (Do the reset outside of interrupt context).
2525 */
2526 adapter->tx_timeout_count++;
2527 schedule_work(&adapter->reset_task);
2528 /* exit immediately since reset is imminent */
2529 return;
2530 }
2531 }
2532
2533 /* Simple mode for Interrupt Throttle Rate (ITR) */
2534 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2535 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2536 * Total asymmetrical Tx or Rx gets ITR=8000;
2537 * everyone else is between 2000-8000.
2538 */
2539 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2540 u32 dif = (adapter->gotcl > adapter->gorcl ?
2541 adapter->gotcl - adapter->gorcl :
2542 adapter->gorcl - adapter->gotcl) / 10000;
2543 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2544
2545 ew32(ITR, 1000000000 / (itr * 256));
2546 }
2547
2548 /* Cause software interrupt to ensure rx ring is cleaned */
2549 ew32(ICS, E1000_ICS_RXDMT0);
2550
2551 /* Force detection of hung controller every watchdog period */
2552 adapter->detect_tx_hung = true;
2553
2554 /* Reschedule the task */
2555 if (!test_bit(__E1000_DOWN, &adapter->flags))
2556 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2557}
2558
2559enum latency_range {
2560 lowest_latency = 0,
2561 low_latency = 1,
2562 bulk_latency = 2,
2563 latency_invalid = 255
2564};
2565
2566/**
2567 * e1000_update_itr - update the dynamic ITR value based on statistics
2568 * @adapter: pointer to adapter
2569 * @itr_setting: current adapter->itr
2570 * @packets: the number of packets during this measurement interval
2571 * @bytes: the number of bytes during this measurement interval
2572 *
2573 * Stores a new ITR value based on packets and byte
2574 * counts during the last interrupt. The advantage of per interrupt
2575 * computation is faster updates and more accurate ITR for the current
2576 * traffic pattern. Constants in this function were computed
2577 * based on theoretical maximum wire speed and thresholds were set based
2578 * on testing data as well as attempting to minimize response time
2579 * while increasing bulk throughput.
2580 * this functionality is controlled by the InterruptThrottleRate module
2581 * parameter (see e1000_param.c)
2582 **/
2583static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2584 u16 itr_setting, int packets, int bytes)
2585{
2586 unsigned int retval = itr_setting;
2587 struct e1000_hw *hw = &adapter->hw;
2588
2589 if (unlikely(hw->mac_type < e1000_82540))
2590 goto update_itr_done;
2591
2592 if (packets == 0)
2593 goto update_itr_done;
2594
2595 switch (itr_setting) {
2596 case lowest_latency:
2597 /* jumbo frames get bulk treatment*/
2598 if (bytes/packets > 8000)
2599 retval = bulk_latency;
2600 else if ((packets < 5) && (bytes > 512))
2601 retval = low_latency;
2602 break;
2603 case low_latency: /* 50 usec aka 20000 ints/s */
2604 if (bytes > 10000) {
2605 /* jumbo frames need bulk latency setting */
2606 if (bytes/packets > 8000)
2607 retval = bulk_latency;
2608 else if ((packets < 10) || ((bytes/packets) > 1200))
2609 retval = bulk_latency;
2610 else if ((packets > 35))
2611 retval = lowest_latency;
2612 } else if (bytes/packets > 2000)
2613 retval = bulk_latency;
2614 else if (packets <= 2 && bytes < 512)
2615 retval = lowest_latency;
2616 break;
2617 case bulk_latency: /* 250 usec aka 4000 ints/s */
2618 if (bytes > 25000) {
2619 if (packets > 35)
2620 retval = low_latency;
2621 } else if (bytes < 6000) {
2622 retval = low_latency;
2623 }
2624 break;
2625 }
2626
2627update_itr_done:
2628 return retval;
2629}
2630
2631static void e1000_set_itr(struct e1000_adapter *adapter)
2632{
2633 struct e1000_hw *hw = &adapter->hw;
2634 u16 current_itr;
2635 u32 new_itr = adapter->itr;
2636
2637 if (unlikely(hw->mac_type < e1000_82540))
2638 return;
2639
2640 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2641 if (unlikely(adapter->link_speed != SPEED_1000)) {
2642 current_itr = 0;
2643 new_itr = 4000;
2644 goto set_itr_now;
2645 }
2646
2647 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2648 adapter->total_tx_packets,
2649 adapter->total_tx_bytes);
2650 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2651 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2652 adapter->tx_itr = low_latency;
2653
2654 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2655 adapter->total_rx_packets,
2656 adapter->total_rx_bytes);
2657 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2658 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2659 adapter->rx_itr = low_latency;
2660
2661 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2662
2663 switch (current_itr) {
2664 /* counts and packets in update_itr are dependent on these numbers */
2665 case lowest_latency:
2666 new_itr = 70000;
2667 break;
2668 case low_latency:
2669 new_itr = 20000; /* aka hwitr = ~200 */
2670 break;
2671 case bulk_latency:
2672 new_itr = 4000;
2673 break;
2674 default:
2675 break;
2676 }
2677
2678set_itr_now:
2679 if (new_itr != adapter->itr) {
2680 /* this attempts to bias the interrupt rate towards Bulk
2681 * by adding intermediate steps when interrupt rate is
2682 * increasing
2683 */
2684 new_itr = new_itr > adapter->itr ?
2685 min(adapter->itr + (new_itr >> 2), new_itr) :
2686 new_itr;
2687 adapter->itr = new_itr;
2688 ew32(ITR, 1000000000 / (new_itr * 256));
2689 }
2690}
2691
2692#define E1000_TX_FLAGS_CSUM 0x00000001
2693#define E1000_TX_FLAGS_VLAN 0x00000002
2694#define E1000_TX_FLAGS_TSO 0x00000004
2695#define E1000_TX_FLAGS_IPV4 0x00000008
2696#define E1000_TX_FLAGS_NO_FCS 0x00000010
2697#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2698#define E1000_TX_FLAGS_VLAN_SHIFT 16
2699
2700static int e1000_tso(struct e1000_adapter *adapter,
2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2702 __be16 protocol)
2703{
2704 struct e1000_context_desc *context_desc;
2705 struct e1000_tx_buffer *buffer_info;
2706 unsigned int i;
2707 u32 cmd_length = 0;
2708 u16 ipcse = 0, tucse, mss;
2709 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2710
2711 if (skb_is_gso(skb)) {
2712 int err;
2713
2714 err = skb_cow_head(skb, 0);
2715 if (err < 0)
2716 return err;
2717
2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2719 mss = skb_shinfo(skb)->gso_size;
2720 if (protocol == htons(ETH_P_IP)) {
2721 struct iphdr *iph = ip_hdr(skb);
2722 iph->tot_len = 0;
2723 iph->check = 0;
2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2725 iph->daddr, 0,
2726 IPPROTO_TCP,
2727 0);
2728 cmd_length = E1000_TXD_CMD_IP;
2729 ipcse = skb_transport_offset(skb) - 1;
2730 } else if (skb_is_gso_v6(skb)) {
2731 ipv6_hdr(skb)->payload_len = 0;
2732 tcp_hdr(skb)->check =
2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2734 &ipv6_hdr(skb)->daddr,
2735 0, IPPROTO_TCP, 0);
2736 ipcse = 0;
2737 }
2738 ipcss = skb_network_offset(skb);
2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2740 tucss = skb_transport_offset(skb);
2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2742 tucse = 0;
2743
2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2746
2747 i = tx_ring->next_to_use;
2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2749 buffer_info = &tx_ring->buffer_info[i];
2750
2751 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2752 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2754 context_desc->upper_setup.tcp_fields.tucss = tucss;
2755 context_desc->upper_setup.tcp_fields.tucso = tucso;
2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2760
2761 buffer_info->time_stamp = jiffies;
2762 buffer_info->next_to_watch = i;
2763
2764 if (++i == tx_ring->count)
2765 i = 0;
2766
2767 tx_ring->next_to_use = i;
2768
2769 return true;
2770 }
2771 return false;
2772}
2773
2774static bool e1000_tx_csum(struct e1000_adapter *adapter,
2775 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2776 __be16 protocol)
2777{
2778 struct e1000_context_desc *context_desc;
2779 struct e1000_tx_buffer *buffer_info;
2780 unsigned int i;
2781 u8 css;
2782 u32 cmd_len = E1000_TXD_CMD_DEXT;
2783
2784 if (skb->ip_summed != CHECKSUM_PARTIAL)
2785 return false;
2786
2787 switch (protocol) {
2788 case cpu_to_be16(ETH_P_IP):
2789 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2790 cmd_len |= E1000_TXD_CMD_TCP;
2791 break;
2792 case cpu_to_be16(ETH_P_IPV6):
2793 /* XXX not handling all IPV6 headers */
2794 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2795 cmd_len |= E1000_TXD_CMD_TCP;
2796 break;
2797 default:
2798 if (unlikely(net_ratelimit()))
2799 e_warn(drv, "checksum_partial proto=%x!\n",
2800 skb->protocol);
2801 break;
2802 }
2803
2804 css = skb_checksum_start_offset(skb);
2805
2806 i = tx_ring->next_to_use;
2807 buffer_info = &tx_ring->buffer_info[i];
2808 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2809
2810 context_desc->lower_setup.ip_config = 0;
2811 context_desc->upper_setup.tcp_fields.tucss = css;
2812 context_desc->upper_setup.tcp_fields.tucso =
2813 css + skb->csum_offset;
2814 context_desc->upper_setup.tcp_fields.tucse = 0;
2815 context_desc->tcp_seg_setup.data = 0;
2816 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2817
2818 buffer_info->time_stamp = jiffies;
2819 buffer_info->next_to_watch = i;
2820
2821 if (unlikely(++i == tx_ring->count))
2822 i = 0;
2823
2824 tx_ring->next_to_use = i;
2825
2826 return true;
2827}
2828
2829#define E1000_MAX_TXD_PWR 12
2830#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2831
2832static int e1000_tx_map(struct e1000_adapter *adapter,
2833 struct e1000_tx_ring *tx_ring,
2834 struct sk_buff *skb, unsigned int first,
2835 unsigned int max_per_txd, unsigned int nr_frags,
2836 unsigned int mss)
2837{
2838 struct e1000_hw *hw = &adapter->hw;
2839 struct pci_dev *pdev = adapter->pdev;
2840 struct e1000_tx_buffer *buffer_info;
2841 unsigned int len = skb_headlen(skb);
2842 unsigned int offset = 0, size, count = 0, i;
2843 unsigned int f, bytecount, segs;
2844
2845 i = tx_ring->next_to_use;
2846
2847 while (len) {
2848 buffer_info = &tx_ring->buffer_info[i];
2849 size = min(len, max_per_txd);
2850 /* Workaround for Controller erratum --
2851 * descriptor for non-tso packet in a linear SKB that follows a
2852 * tso gets written back prematurely before the data is fully
2853 * DMA'd to the controller
2854 */
2855 if (!skb->data_len && tx_ring->last_tx_tso &&
2856 !skb_is_gso(skb)) {
2857 tx_ring->last_tx_tso = false;
2858 size -= 4;
2859 }
2860
2861 /* Workaround for premature desc write-backs
2862 * in TSO mode. Append 4-byte sentinel desc
2863 */
2864 if (unlikely(mss && !nr_frags && size == len && size > 8))
2865 size -= 4;
2866 /* work-around for errata 10 and it applies
2867 * to all controllers in PCI-X mode
2868 * The fix is to make sure that the first descriptor of a
2869 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2870 */
2871 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2872 (size > 2015) && count == 0))
2873 size = 2015;
2874
2875 /* Workaround for potential 82544 hang in PCI-X. Avoid
2876 * terminating buffers within evenly-aligned dwords.
2877 */
2878 if (unlikely(adapter->pcix_82544 &&
2879 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2880 size > 4))
2881 size -= 4;
2882
2883 buffer_info->length = size;
2884 /* set time_stamp *before* dma to help avoid a possible race */
2885 buffer_info->time_stamp = jiffies;
2886 buffer_info->mapped_as_page = false;
2887 buffer_info->dma = dma_map_single(&pdev->dev,
2888 skb->data + offset,
2889 size, DMA_TO_DEVICE);
2890 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2891 goto dma_error;
2892 buffer_info->next_to_watch = i;
2893
2894 len -= size;
2895 offset += size;
2896 count++;
2897 if (len) {
2898 i++;
2899 if (unlikely(i == tx_ring->count))
2900 i = 0;
2901 }
2902 }
2903
2904 for (f = 0; f < nr_frags; f++) {
2905 const struct skb_frag_struct *frag;
2906
2907 frag = &skb_shinfo(skb)->frags[f];
2908 len = skb_frag_size(frag);
2909 offset = 0;
2910
2911 while (len) {
2912 unsigned long bufend;
2913 i++;
2914 if (unlikely(i == tx_ring->count))
2915 i = 0;
2916
2917 buffer_info = &tx_ring->buffer_info[i];
2918 size = min(len, max_per_txd);
2919 /* Workaround for premature desc write-backs
2920 * in TSO mode. Append 4-byte sentinel desc
2921 */
2922 if (unlikely(mss && f == (nr_frags-1) &&
2923 size == len && size > 8))
2924 size -= 4;
2925 /* Workaround for potential 82544 hang in PCI-X.
2926 * Avoid terminating buffers within evenly-aligned
2927 * dwords.
2928 */
2929 bufend = (unsigned long)
2930 page_to_phys(skb_frag_page(frag));
2931 bufend += offset + size - 1;
2932 if (unlikely(adapter->pcix_82544 &&
2933 !(bufend & 4) &&
2934 size > 4))
2935 size -= 4;
2936
2937 buffer_info->length = size;
2938 buffer_info->time_stamp = jiffies;
2939 buffer_info->mapped_as_page = true;
2940 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2941 offset, size, DMA_TO_DEVICE);
2942 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2943 goto dma_error;
2944 buffer_info->next_to_watch = i;
2945
2946 len -= size;
2947 offset += size;
2948 count++;
2949 }
2950 }
2951
2952 segs = skb_shinfo(skb)->gso_segs ?: 1;
2953 /* multiply data chunks by size of headers */
2954 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2955
2956 tx_ring->buffer_info[i].skb = skb;
2957 tx_ring->buffer_info[i].segs = segs;
2958 tx_ring->buffer_info[i].bytecount = bytecount;
2959 tx_ring->buffer_info[first].next_to_watch = i;
2960
2961 return count;
2962
2963dma_error:
2964 dev_err(&pdev->dev, "TX DMA map failed\n");
2965 buffer_info->dma = 0;
2966 if (count)
2967 count--;
2968
2969 while (count--) {
2970 if (i == 0)
2971 i += tx_ring->count;
2972 i--;
2973 buffer_info = &tx_ring->buffer_info[i];
2974 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2975 }
2976
2977 return 0;
2978}
2979
2980static void e1000_tx_queue(struct e1000_adapter *adapter,
2981 struct e1000_tx_ring *tx_ring, int tx_flags,
2982 int count)
2983{
2984 struct e1000_tx_desc *tx_desc = NULL;
2985 struct e1000_tx_buffer *buffer_info;
2986 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2987 unsigned int i;
2988
2989 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2990 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2991 E1000_TXD_CMD_TSE;
2992 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2993
2994 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2995 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2996 }
2997
2998 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2999 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3000 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3001 }
3002
3003 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3004 txd_lower |= E1000_TXD_CMD_VLE;
3005 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3006 }
3007
3008 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3009 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3010
3011 i = tx_ring->next_to_use;
3012
3013 while (count--) {
3014 buffer_info = &tx_ring->buffer_info[i];
3015 tx_desc = E1000_TX_DESC(*tx_ring, i);
3016 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3017 tx_desc->lower.data =
3018 cpu_to_le32(txd_lower | buffer_info->length);
3019 tx_desc->upper.data = cpu_to_le32(txd_upper);
3020 if (unlikely(++i == tx_ring->count))
3021 i = 0;
3022 }
3023
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3025
3026 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3027 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3028 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3029
3030 /* Force memory writes to complete before letting h/w
3031 * know there are new descriptors to fetch. (Only
3032 * applicable for weak-ordered memory model archs,
3033 * such as IA-64).
3034 */
3035 wmb();
3036
3037 tx_ring->next_to_use = i;
3038}
3039
3040/* 82547 workaround to avoid controller hang in half-duplex environment.
3041 * The workaround is to avoid queuing a large packet that would span
3042 * the internal Tx FIFO ring boundary by notifying the stack to resend
3043 * the packet at a later time. This gives the Tx FIFO an opportunity to
3044 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3045 * to the beginning of the Tx FIFO.
3046 */
3047
3048#define E1000_FIFO_HDR 0x10
3049#define E1000_82547_PAD_LEN 0x3E0
3050
3051static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3052 struct sk_buff *skb)
3053{
3054 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3055 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3056
3057 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3058
3059 if (adapter->link_duplex != HALF_DUPLEX)
3060 goto no_fifo_stall_required;
3061
3062 if (atomic_read(&adapter->tx_fifo_stall))
3063 return 1;
3064
3065 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3066 atomic_set(&adapter->tx_fifo_stall, 1);
3067 return 1;
3068 }
3069
3070no_fifo_stall_required:
3071 adapter->tx_fifo_head += skb_fifo_len;
3072 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3073 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3074 return 0;
3075}
3076
3077static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3078{
3079 struct e1000_adapter *adapter = netdev_priv(netdev);
3080 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3081
3082 netif_stop_queue(netdev);
3083 /* Herbert's original patch had:
3084 * smp_mb__after_netif_stop_queue();
3085 * but since that doesn't exist yet, just open code it.
3086 */
3087 smp_mb();
3088
3089 /* We need to check again in a case another CPU has just
3090 * made room available.
3091 */
3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3093 return -EBUSY;
3094
3095 /* A reprieve! */
3096 netif_start_queue(netdev);
3097 ++adapter->restart_queue;
3098 return 0;
3099}
3100
3101static int e1000_maybe_stop_tx(struct net_device *netdev,
3102 struct e1000_tx_ring *tx_ring, int size)
3103{
3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3105 return 0;
3106 return __e1000_maybe_stop_tx(netdev, size);
3107}
3108
3109#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev)
3112{
3113 struct e1000_adapter *adapter = netdev_priv(netdev);
3114 struct e1000_hw *hw = &adapter->hw;
3115 struct e1000_tx_ring *tx_ring;
3116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3118 unsigned int tx_flags = 0;
3119 unsigned int len = skb_headlen(skb);
3120 unsigned int nr_frags;
3121 unsigned int mss;
3122 int count = 0;
3123 int tso;
3124 unsigned int f;
3125 __be16 protocol = vlan_get_protocol(skb);
3126
3127 /* This goes back to the question of how to logically map a Tx queue
3128 * to a flow. Right now, performance is impacted slightly negatively
3129 * if using multiple Tx queues. If the stack breaks away from a
3130 * single qdisc implementation, we can look at this again.
3131 */
3132 tx_ring = adapter->tx_ring;
3133
3134 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3135 * packets may get corrupted during padding by HW.
3136 * To WA this issue, pad all small packets manually.
3137 */
3138 if (eth_skb_pad(skb))
3139 return NETDEV_TX_OK;
3140
3141 mss = skb_shinfo(skb)->gso_size;
3142 /* The controller does a simple calculation to
3143 * make sure there is enough room in the FIFO before
3144 * initiating the DMA for each buffer. The calc is:
3145 * 4 = ceil(buffer len/mss). To make sure we don't
3146 * overrun the FIFO, adjust the max buffer len if mss
3147 * drops.
3148 */
3149 if (mss) {
3150 u8 hdr_len;
3151 max_per_txd = min(mss << 2, max_per_txd);
3152 max_txd_pwr = fls(max_per_txd) - 1;
3153
3154 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3155 if (skb->data_len && hdr_len == len) {
3156 switch (hw->mac_type) {
3157 unsigned int pull_size;
3158 case e1000_82544:
3159 /* Make sure we have room to chop off 4 bytes,
3160 * and that the end alignment will work out to
3161 * this hardware's requirements
3162 * NOTE: this is a TSO only workaround
3163 * if end byte alignment not correct move us
3164 * into the next dword
3165 */
3166 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3167 & 4)
3168 break;
3169 /* fall through */
3170 pull_size = min((unsigned int)4, skb->data_len);
3171 if (!__pskb_pull_tail(skb, pull_size)) {
3172 e_err(drv, "__pskb_pull_tail "
3173 "failed.\n");
3174 dev_kfree_skb_any(skb);
3175 return NETDEV_TX_OK;
3176 }
3177 len = skb_headlen(skb);
3178 break;
3179 default:
3180 /* do nothing */
3181 break;
3182 }
3183 }
3184 }
3185
3186 /* reserve a descriptor for the offload context */
3187 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3188 count++;
3189 count++;
3190
3191 /* Controller Erratum workaround */
3192 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3193 count++;
3194
3195 count += TXD_USE_COUNT(len, max_txd_pwr);
3196
3197 if (adapter->pcix_82544)
3198 count++;
3199
3200 /* work-around for errata 10 and it applies to all controllers
3201 * in PCI-X mode, so add one more descriptor to the count
3202 */
3203 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3204 (len > 2015)))
3205 count++;
3206
3207 nr_frags = skb_shinfo(skb)->nr_frags;
3208 for (f = 0; f < nr_frags; f++)
3209 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3210 max_txd_pwr);
3211 if (adapter->pcix_82544)
3212 count += nr_frags;
3213
3214 /* need: count + 2 desc gap to keep tail from touching
3215 * head, otherwise try next time
3216 */
3217 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3218 return NETDEV_TX_BUSY;
3219
3220 if (unlikely((hw->mac_type == e1000_82547) &&
3221 (e1000_82547_fifo_workaround(adapter, skb)))) {
3222 netif_stop_queue(netdev);
3223 if (!test_bit(__E1000_DOWN, &adapter->flags))
3224 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3225 return NETDEV_TX_BUSY;
3226 }
3227
3228 if (skb_vlan_tag_present(skb)) {
3229 tx_flags |= E1000_TX_FLAGS_VLAN;
3230 tx_flags |= (skb_vlan_tag_get(skb) <<
3231 E1000_TX_FLAGS_VLAN_SHIFT);
3232 }
3233
3234 first = tx_ring->next_to_use;
3235
3236 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3237 if (tso < 0) {
3238 dev_kfree_skb_any(skb);
3239 return NETDEV_TX_OK;
3240 }
3241
3242 if (likely(tso)) {
3243 if (likely(hw->mac_type != e1000_82544))
3244 tx_ring->last_tx_tso = true;
3245 tx_flags |= E1000_TX_FLAGS_TSO;
3246 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3247 tx_flags |= E1000_TX_FLAGS_CSUM;
3248
3249 if (protocol == htons(ETH_P_IP))
3250 tx_flags |= E1000_TX_FLAGS_IPV4;
3251
3252 if (unlikely(skb->no_fcs))
3253 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3254
3255 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3256 nr_frags, mss);
3257
3258 if (count) {
3259 /* The descriptors needed is higher than other Intel drivers
3260 * due to a number of workarounds. The breakdown is below:
3261 * Data descriptors: MAX_SKB_FRAGS + 1
3262 * Context Descriptor: 1
3263 * Keep head from touching tail: 2
3264 * Workarounds: 3
3265 */
3266 int desc_needed = MAX_SKB_FRAGS + 7;
3267
3268 netdev_sent_queue(netdev, skb->len);
3269 skb_tx_timestamp(skb);
3270
3271 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3272
3273 /* 82544 potentially requires twice as many data descriptors
3274 * in order to guarantee buffers don't end on evenly-aligned
3275 * dwords
3276 */
3277 if (adapter->pcix_82544)
3278 desc_needed += MAX_SKB_FRAGS + 1;
3279
3280 /* Make sure there is space in the ring for the next send. */
3281 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3282
3283 if (!skb->xmit_more ||
3284 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3285 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3286 /* we need this if more than one processor can write to
3287 * our tail at a time, it synchronizes IO on IA64/Altix
3288 * systems
3289 */
3290 mmiowb();
3291 }
3292 } else {
3293 dev_kfree_skb_any(skb);
3294 tx_ring->buffer_info[first].time_stamp = 0;
3295 tx_ring->next_to_use = first;
3296 }
3297
3298 return NETDEV_TX_OK;
3299}
3300
3301#define NUM_REGS 38 /* 1 based count */
3302static void e1000_regdump(struct e1000_adapter *adapter)
3303{
3304 struct e1000_hw *hw = &adapter->hw;
3305 u32 regs[NUM_REGS];
3306 u32 *regs_buff = regs;
3307 int i = 0;
3308
3309 static const char * const reg_name[] = {
3310 "CTRL", "STATUS",
3311 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3312 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3313 "TIDV", "TXDCTL", "TADV", "TARC0",
3314 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3315 "TXDCTL1", "TARC1",
3316 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3317 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3318 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3319 };
3320
3321 regs_buff[0] = er32(CTRL);
3322 regs_buff[1] = er32(STATUS);
3323
3324 regs_buff[2] = er32(RCTL);
3325 regs_buff[3] = er32(RDLEN);
3326 regs_buff[4] = er32(RDH);
3327 regs_buff[5] = er32(RDT);
3328 regs_buff[6] = er32(RDTR);
3329
3330 regs_buff[7] = er32(TCTL);
3331 regs_buff[8] = er32(TDBAL);
3332 regs_buff[9] = er32(TDBAH);
3333 regs_buff[10] = er32(TDLEN);
3334 regs_buff[11] = er32(TDH);
3335 regs_buff[12] = er32(TDT);
3336 regs_buff[13] = er32(TIDV);
3337 regs_buff[14] = er32(TXDCTL);
3338 regs_buff[15] = er32(TADV);
3339 regs_buff[16] = er32(TARC0);
3340
3341 regs_buff[17] = er32(TDBAL1);
3342 regs_buff[18] = er32(TDBAH1);
3343 regs_buff[19] = er32(TDLEN1);
3344 regs_buff[20] = er32(TDH1);
3345 regs_buff[21] = er32(TDT1);
3346 regs_buff[22] = er32(TXDCTL1);
3347 regs_buff[23] = er32(TARC1);
3348 regs_buff[24] = er32(CTRL_EXT);
3349 regs_buff[25] = er32(ERT);
3350 regs_buff[26] = er32(RDBAL0);
3351 regs_buff[27] = er32(RDBAH0);
3352 regs_buff[28] = er32(TDFH);
3353 regs_buff[29] = er32(TDFT);
3354 regs_buff[30] = er32(TDFHS);
3355 regs_buff[31] = er32(TDFTS);
3356 regs_buff[32] = er32(TDFPC);
3357 regs_buff[33] = er32(RDFH);
3358 regs_buff[34] = er32(RDFT);
3359 regs_buff[35] = er32(RDFHS);
3360 regs_buff[36] = er32(RDFTS);
3361 regs_buff[37] = er32(RDFPC);
3362
3363 pr_info("Register dump\n");
3364 for (i = 0; i < NUM_REGS; i++)
3365 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3366}
3367
3368/*
3369 * e1000_dump: Print registers, tx ring and rx ring
3370 */
3371static void e1000_dump(struct e1000_adapter *adapter)
3372{
3373 /* this code doesn't handle multiple rings */
3374 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3375 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3376 int i;
3377
3378 if (!netif_msg_hw(adapter))
3379 return;
3380
3381 /* Print Registers */
3382 e1000_regdump(adapter);
3383
3384 /* transmit dump */
3385 pr_info("TX Desc ring0 dump\n");
3386
3387 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3388 *
3389 * Legacy Transmit Descriptor
3390 * +--------------------------------------------------------------+
3391 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3392 * +--------------------------------------------------------------+
3393 * 8 | Special | CSS | Status | CMD | CSO | Length |
3394 * +--------------------------------------------------------------+
3395 * 63 48 47 36 35 32 31 24 23 16 15 0
3396 *
3397 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3398 * 63 48 47 40 39 32 31 16 15 8 7 0
3399 * +----------------------------------------------------------------+
3400 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3401 * +----------------------------------------------------------------+
3402 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3403 * +----------------------------------------------------------------+
3404 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3405 *
3406 * Extended Data Descriptor (DTYP=0x1)
3407 * +----------------------------------------------------------------+
3408 * 0 | Buffer Address [63:0] |
3409 * +----------------------------------------------------------------+
3410 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3411 * +----------------------------------------------------------------+
3412 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3413 */
3414 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3415 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3416
3417 if (!netif_msg_tx_done(adapter))
3418 goto rx_ring_summary;
3419
3420 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3421 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3422 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3423 struct my_u { __le64 a; __le64 b; };
3424 struct my_u *u = (struct my_u *)tx_desc;
3425 const char *type;
3426
3427 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3428 type = "NTC/U";
3429 else if (i == tx_ring->next_to_use)
3430 type = "NTU";
3431 else if (i == tx_ring->next_to_clean)
3432 type = "NTC";
3433 else
3434 type = "";
3435
3436 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3437 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3438 le64_to_cpu(u->a), le64_to_cpu(u->b),
3439 (u64)buffer_info->dma, buffer_info->length,
3440 buffer_info->next_to_watch,
3441 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3442 }
3443
3444rx_ring_summary:
3445 /* receive dump */
3446 pr_info("\nRX Desc ring dump\n");
3447
3448 /* Legacy Receive Descriptor Format
3449 *
3450 * +-----------------------------------------------------+
3451 * | Buffer Address [63:0] |
3452 * +-----------------------------------------------------+
3453 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3454 * +-----------------------------------------------------+
3455 * 63 48 47 40 39 32 31 16 15 0
3456 */
3457 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3458
3459 if (!netif_msg_rx_status(adapter))
3460 goto exit;
3461
3462 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3463 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3464 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3465 struct my_u { __le64 a; __le64 b; };
3466 struct my_u *u = (struct my_u *)rx_desc;
3467 const char *type;
3468
3469 if (i == rx_ring->next_to_use)
3470 type = "NTU";
3471 else if (i == rx_ring->next_to_clean)
3472 type = "NTC";
3473 else
3474 type = "";
3475
3476 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3477 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3478 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3479 } /* for */
3480
3481 /* dump the descriptor caches */
3482 /* rx */
3483 pr_info("Rx descriptor cache in 64bit format\n");
3484 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3485 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3486 i,
3487 readl(adapter->hw.hw_addr + i+4),
3488 readl(adapter->hw.hw_addr + i),
3489 readl(adapter->hw.hw_addr + i+12),
3490 readl(adapter->hw.hw_addr + i+8));
3491 }
3492 /* tx */
3493 pr_info("Tx descriptor cache in 64bit format\n");
3494 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3495 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3496 i,
3497 readl(adapter->hw.hw_addr + i+4),
3498 readl(adapter->hw.hw_addr + i),
3499 readl(adapter->hw.hw_addr + i+12),
3500 readl(adapter->hw.hw_addr + i+8));
3501 }
3502exit:
3503 return;
3504}
3505
3506/**
3507 * e1000_tx_timeout - Respond to a Tx Hang
3508 * @netdev: network interface device structure
3509 **/
3510static void e1000_tx_timeout(struct net_device *netdev)
3511{
3512 struct e1000_adapter *adapter = netdev_priv(netdev);
3513
3514 /* Do the reset outside of interrupt context */
3515 adapter->tx_timeout_count++;
3516 schedule_work(&adapter->reset_task);
3517}
3518
3519static void e1000_reset_task(struct work_struct *work)
3520{
3521 struct e1000_adapter *adapter =
3522 container_of(work, struct e1000_adapter, reset_task);
3523
3524 e_err(drv, "Reset adapter\n");
3525 e1000_reinit_locked(adapter);
3526}
3527
3528/**
3529 * e1000_get_stats - Get System Network Statistics
3530 * @netdev: network interface device structure
3531 *
3532 * Returns the address of the device statistics structure.
3533 * The statistics are actually updated from the watchdog.
3534 **/
3535static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3536{
3537 /* only return the current stats */
3538 return &netdev->stats;
3539}
3540
3541/**
3542 * e1000_change_mtu - Change the Maximum Transfer Unit
3543 * @netdev: network interface device structure
3544 * @new_mtu: new value for maximum frame size
3545 *
3546 * Returns 0 on success, negative on failure
3547 **/
3548static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3549{
3550 struct e1000_adapter *adapter = netdev_priv(netdev);
3551 struct e1000_hw *hw = &adapter->hw;
3552 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3553
3554 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3555 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3556 e_err(probe, "Invalid MTU setting\n");
3557 return -EINVAL;
3558 }
3559
3560 /* Adapter-specific max frame size limits. */
3561 switch (hw->mac_type) {
3562 case e1000_undefined ... e1000_82542_rev2_1:
3563 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3564 e_err(probe, "Jumbo Frames not supported.\n");
3565 return -EINVAL;
3566 }
3567 break;
3568 default:
3569 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3570 break;
3571 }
3572
3573 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3574 msleep(1);
3575 /* e1000_down has a dependency on max_frame_size */
3576 hw->max_frame_size = max_frame;
3577 if (netif_running(netdev)) {
3578 /* prevent buffers from being reallocated */
3579 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3580 e1000_down(adapter);
3581 }
3582
3583 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3584 * means we reserve 2 more, this pushes us to allocate from the next
3585 * larger slab size.
3586 * i.e. RXBUFFER_2048 --> size-4096 slab
3587 * however with the new *_jumbo_rx* routines, jumbo receives will use
3588 * fragmented skbs
3589 */
3590
3591 if (max_frame <= E1000_RXBUFFER_2048)
3592 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3593 else
3594#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3595 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3596#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3597 adapter->rx_buffer_len = PAGE_SIZE;
3598#endif
3599
3600 /* adjust allocation if LPE protects us, and we aren't using SBP */
3601 if (!hw->tbi_compatibility_on &&
3602 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3603 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3604 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3605
3606 pr_info("%s changing MTU from %d to %d\n",
3607 netdev->name, netdev->mtu, new_mtu);
3608 netdev->mtu = new_mtu;
3609
3610 if (netif_running(netdev))
3611 e1000_up(adapter);
3612 else
3613 e1000_reset(adapter);
3614
3615 clear_bit(__E1000_RESETTING, &adapter->flags);
3616
3617 return 0;
3618}
3619
3620/**
3621 * e1000_update_stats - Update the board statistics counters
3622 * @adapter: board private structure
3623 **/
3624void e1000_update_stats(struct e1000_adapter *adapter)
3625{
3626 struct net_device *netdev = adapter->netdev;
3627 struct e1000_hw *hw = &adapter->hw;
3628 struct pci_dev *pdev = adapter->pdev;
3629 unsigned long flags;
3630 u16 phy_tmp;
3631
3632#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3633
3634 /* Prevent stats update while adapter is being reset, or if the pci
3635 * connection is down.
3636 */
3637 if (adapter->link_speed == 0)
3638 return;
3639 if (pci_channel_offline(pdev))
3640 return;
3641
3642 spin_lock_irqsave(&adapter->stats_lock, flags);
3643
3644 /* these counters are modified from e1000_tbi_adjust_stats,
3645 * called from the interrupt context, so they must only
3646 * be written while holding adapter->stats_lock
3647 */
3648
3649 adapter->stats.crcerrs += er32(CRCERRS);
3650 adapter->stats.gprc += er32(GPRC);
3651 adapter->stats.gorcl += er32(GORCL);
3652 adapter->stats.gorch += er32(GORCH);
3653 adapter->stats.bprc += er32(BPRC);
3654 adapter->stats.mprc += er32(MPRC);
3655 adapter->stats.roc += er32(ROC);
3656
3657 adapter->stats.prc64 += er32(PRC64);
3658 adapter->stats.prc127 += er32(PRC127);
3659 adapter->stats.prc255 += er32(PRC255);
3660 adapter->stats.prc511 += er32(PRC511);
3661 adapter->stats.prc1023 += er32(PRC1023);
3662 adapter->stats.prc1522 += er32(PRC1522);
3663
3664 adapter->stats.symerrs += er32(SYMERRS);
3665 adapter->stats.mpc += er32(MPC);
3666 adapter->stats.scc += er32(SCC);
3667 adapter->stats.ecol += er32(ECOL);
3668 adapter->stats.mcc += er32(MCC);
3669 adapter->stats.latecol += er32(LATECOL);
3670 adapter->stats.dc += er32(DC);
3671 adapter->stats.sec += er32(SEC);
3672 adapter->stats.rlec += er32(RLEC);
3673 adapter->stats.xonrxc += er32(XONRXC);
3674 adapter->stats.xontxc += er32(XONTXC);
3675 adapter->stats.xoffrxc += er32(XOFFRXC);
3676 adapter->stats.xofftxc += er32(XOFFTXC);
3677 adapter->stats.fcruc += er32(FCRUC);
3678 adapter->stats.gptc += er32(GPTC);
3679 adapter->stats.gotcl += er32(GOTCL);
3680 adapter->stats.gotch += er32(GOTCH);
3681 adapter->stats.rnbc += er32(RNBC);
3682 adapter->stats.ruc += er32(RUC);
3683 adapter->stats.rfc += er32(RFC);
3684 adapter->stats.rjc += er32(RJC);
3685 adapter->stats.torl += er32(TORL);
3686 adapter->stats.torh += er32(TORH);
3687 adapter->stats.totl += er32(TOTL);
3688 adapter->stats.toth += er32(TOTH);
3689 adapter->stats.tpr += er32(TPR);
3690
3691 adapter->stats.ptc64 += er32(PTC64);
3692 adapter->stats.ptc127 += er32(PTC127);
3693 adapter->stats.ptc255 += er32(PTC255);
3694 adapter->stats.ptc511 += er32(PTC511);
3695 adapter->stats.ptc1023 += er32(PTC1023);
3696 adapter->stats.ptc1522 += er32(PTC1522);
3697
3698 adapter->stats.mptc += er32(MPTC);
3699 adapter->stats.bptc += er32(BPTC);
3700
3701 /* used for adaptive IFS */
3702
3703 hw->tx_packet_delta = er32(TPT);
3704 adapter->stats.tpt += hw->tx_packet_delta;
3705 hw->collision_delta = er32(COLC);
3706 adapter->stats.colc += hw->collision_delta;
3707
3708 if (hw->mac_type >= e1000_82543) {
3709 adapter->stats.algnerrc += er32(ALGNERRC);
3710 adapter->stats.rxerrc += er32(RXERRC);
3711 adapter->stats.tncrs += er32(TNCRS);
3712 adapter->stats.cexterr += er32(CEXTERR);
3713 adapter->stats.tsctc += er32(TSCTC);
3714 adapter->stats.tsctfc += er32(TSCTFC);
3715 }
3716
3717 /* Fill out the OS statistics structure */
3718 netdev->stats.multicast = adapter->stats.mprc;
3719 netdev->stats.collisions = adapter->stats.colc;
3720
3721 /* Rx Errors */
3722
3723 /* RLEC on some newer hardware can be incorrect so build
3724 * our own version based on RUC and ROC
3725 */
3726 netdev->stats.rx_errors = adapter->stats.rxerrc +
3727 adapter->stats.crcerrs + adapter->stats.algnerrc +
3728 adapter->stats.ruc + adapter->stats.roc +
3729 adapter->stats.cexterr;
3730 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3731 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3732 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3733 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3734 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3735
3736 /* Tx Errors */
3737 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3738 netdev->stats.tx_errors = adapter->stats.txerrc;
3739 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3740 netdev->stats.tx_window_errors = adapter->stats.latecol;
3741 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3742 if (hw->bad_tx_carr_stats_fd &&
3743 adapter->link_duplex == FULL_DUPLEX) {
3744 netdev->stats.tx_carrier_errors = 0;
3745 adapter->stats.tncrs = 0;
3746 }
3747
3748 /* Tx Dropped needs to be maintained elsewhere */
3749
3750 /* Phy Stats */
3751 if (hw->media_type == e1000_media_type_copper) {
3752 if ((adapter->link_speed == SPEED_1000) &&
3753 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3754 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3755 adapter->phy_stats.idle_errors += phy_tmp;
3756 }
3757
3758 if ((hw->mac_type <= e1000_82546) &&
3759 (hw->phy_type == e1000_phy_m88) &&
3760 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3761 adapter->phy_stats.receive_errors += phy_tmp;
3762 }
3763
3764 /* Management Stats */
3765 if (hw->has_smbus) {
3766 adapter->stats.mgptc += er32(MGTPTC);
3767 adapter->stats.mgprc += er32(MGTPRC);
3768 adapter->stats.mgpdc += er32(MGTPDC);
3769 }
3770
3771 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3772}
3773
3774/**
3775 * e1000_intr - Interrupt Handler
3776 * @irq: interrupt number
3777 * @data: pointer to a network interface device structure
3778 **/
3779static irqreturn_t e1000_intr(int irq, void *data)
3780{
3781 struct net_device *netdev = data;
3782 struct e1000_adapter *adapter = netdev_priv(netdev);
3783 struct e1000_hw *hw = &adapter->hw;
3784 u32 icr = er32(ICR);
3785
3786 if (unlikely((!icr)))
3787 return IRQ_NONE; /* Not our interrupt */
3788
3789 /* we might have caused the interrupt, but the above
3790 * read cleared it, and just in case the driver is
3791 * down there is nothing to do so return handled
3792 */
3793 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3794 return IRQ_HANDLED;
3795
3796 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3797 hw->get_link_status = 1;
3798 /* guard against interrupt when we're going down */
3799 if (!test_bit(__E1000_DOWN, &adapter->flags))
3800 schedule_delayed_work(&adapter->watchdog_task, 1);
3801 }
3802
3803 /* disable interrupts, without the synchronize_irq bit */
3804 ew32(IMC, ~0);
3805 E1000_WRITE_FLUSH();
3806
3807 if (likely(napi_schedule_prep(&adapter->napi))) {
3808 adapter->total_tx_bytes = 0;
3809 adapter->total_tx_packets = 0;
3810 adapter->total_rx_bytes = 0;
3811 adapter->total_rx_packets = 0;
3812 __napi_schedule(&adapter->napi);
3813 } else {
3814 /* this really should not happen! if it does it is basically a
3815 * bug, but not a hard error, so enable ints and continue
3816 */
3817 if (!test_bit(__E1000_DOWN, &adapter->flags))
3818 e1000_irq_enable(adapter);
3819 }
3820
3821 return IRQ_HANDLED;
3822}
3823
3824/**
3825 * e1000_clean - NAPI Rx polling callback
3826 * @adapter: board private structure
3827 **/
3828static int e1000_clean(struct napi_struct *napi, int budget)
3829{
3830 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3831 napi);
3832 int tx_clean_complete = 0, work_done = 0;
3833
3834 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3835
3836 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3837
3838 if (!tx_clean_complete)
3839 work_done = budget;
3840
3841 /* If budget not fully consumed, exit the polling mode */
3842 if (work_done < budget) {
3843 if (likely(adapter->itr_setting & 3))
3844 e1000_set_itr(adapter);
3845 napi_complete_done(napi, work_done);
3846 if (!test_bit(__E1000_DOWN, &adapter->flags))
3847 e1000_irq_enable(adapter);
3848 }
3849
3850 return work_done;
3851}
3852
3853/**
3854 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3855 * @adapter: board private structure
3856 **/
3857static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3858 struct e1000_tx_ring *tx_ring)
3859{
3860 struct e1000_hw *hw = &adapter->hw;
3861 struct net_device *netdev = adapter->netdev;
3862 struct e1000_tx_desc *tx_desc, *eop_desc;
3863 struct e1000_tx_buffer *buffer_info;
3864 unsigned int i, eop;
3865 unsigned int count = 0;
3866 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3867 unsigned int bytes_compl = 0, pkts_compl = 0;
3868
3869 i = tx_ring->next_to_clean;
3870 eop = tx_ring->buffer_info[i].next_to_watch;
3871 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3872
3873 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3874 (count < tx_ring->count)) {
3875 bool cleaned = false;
3876 dma_rmb(); /* read buffer_info after eop_desc */
3877 for ( ; !cleaned; count++) {
3878 tx_desc = E1000_TX_DESC(*tx_ring, i);
3879 buffer_info = &tx_ring->buffer_info[i];
3880 cleaned = (i == eop);
3881
3882 if (cleaned) {
3883 total_tx_packets += buffer_info->segs;
3884 total_tx_bytes += buffer_info->bytecount;
3885 if (buffer_info->skb) {
3886 bytes_compl += buffer_info->skb->len;
3887 pkts_compl++;
3888 }
3889
3890 }
3891 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3892 tx_desc->upper.data = 0;
3893
3894 if (unlikely(++i == tx_ring->count))
3895 i = 0;
3896 }
3897
3898 eop = tx_ring->buffer_info[i].next_to_watch;
3899 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3900 }
3901
3902 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3903 * which will reuse the cleaned buffers.
3904 */
3905 smp_store_release(&tx_ring->next_to_clean, i);
3906
3907 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3908
3909#define TX_WAKE_THRESHOLD 32
3910 if (unlikely(count && netif_carrier_ok(netdev) &&
3911 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3912 /* Make sure that anybody stopping the queue after this
3913 * sees the new next_to_clean.
3914 */
3915 smp_mb();
3916
3917 if (netif_queue_stopped(netdev) &&
3918 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3919 netif_wake_queue(netdev);
3920 ++adapter->restart_queue;
3921 }
3922 }
3923
3924 if (adapter->detect_tx_hung) {
3925 /* Detect a transmit hang in hardware, this serializes the
3926 * check with the clearing of time_stamp and movement of i
3927 */
3928 adapter->detect_tx_hung = false;
3929 if (tx_ring->buffer_info[eop].time_stamp &&
3930 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3931 (adapter->tx_timeout_factor * HZ)) &&
3932 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3933
3934 /* detected Tx unit hang */
3935 e_err(drv, "Detected Tx Unit Hang\n"
3936 " Tx Queue <%lu>\n"
3937 " TDH <%x>\n"
3938 " TDT <%x>\n"
3939 " next_to_use <%x>\n"
3940 " next_to_clean <%x>\n"
3941 "buffer_info[next_to_clean]\n"
3942 " time_stamp <%lx>\n"
3943 " next_to_watch <%x>\n"
3944 " jiffies <%lx>\n"
3945 " next_to_watch.status <%x>\n",
3946 (unsigned long)(tx_ring - adapter->tx_ring),
3947 readl(hw->hw_addr + tx_ring->tdh),
3948 readl(hw->hw_addr + tx_ring->tdt),
3949 tx_ring->next_to_use,
3950 tx_ring->next_to_clean,
3951 tx_ring->buffer_info[eop].time_stamp,
3952 eop,
3953 jiffies,
3954 eop_desc->upper.fields.status);
3955 e1000_dump(adapter);
3956 netif_stop_queue(netdev);
3957 }
3958 }
3959 adapter->total_tx_bytes += total_tx_bytes;
3960 adapter->total_tx_packets += total_tx_packets;
3961 netdev->stats.tx_bytes += total_tx_bytes;
3962 netdev->stats.tx_packets += total_tx_packets;
3963 return count < tx_ring->count;
3964}
3965
3966/**
3967 * e1000_rx_checksum - Receive Checksum Offload for 82543
3968 * @adapter: board private structure
3969 * @status_err: receive descriptor status and error fields
3970 * @csum: receive descriptor csum field
3971 * @sk_buff: socket buffer with received data
3972 **/
3973static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3974 u32 csum, struct sk_buff *skb)
3975{
3976 struct e1000_hw *hw = &adapter->hw;
3977 u16 status = (u16)status_err;
3978 u8 errors = (u8)(status_err >> 24);
3979
3980 skb_checksum_none_assert(skb);
3981
3982 /* 82543 or newer only */
3983 if (unlikely(hw->mac_type < e1000_82543))
3984 return;
3985 /* Ignore Checksum bit is set */
3986 if (unlikely(status & E1000_RXD_STAT_IXSM))
3987 return;
3988 /* TCP/UDP checksum error bit is set */
3989 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3990 /* let the stack verify checksum errors */
3991 adapter->hw_csum_err++;
3992 return;
3993 }
3994 /* TCP/UDP Checksum has not been calculated */
3995 if (!(status & E1000_RXD_STAT_TCPCS))
3996 return;
3997
3998 /* It must be a TCP or UDP packet with a valid checksum */
3999 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4000 /* TCP checksum is good */
4001 skb->ip_summed = CHECKSUM_UNNECESSARY;
4002 }
4003 adapter->hw_csum_good++;
4004}
4005
4006/**
4007 * e1000_consume_page - helper function for jumbo Rx path
4008 **/
4009static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4010 u16 length)
4011{
4012 bi->rxbuf.page = NULL;
4013 skb->len += length;
4014 skb->data_len += length;
4015 skb->truesize += PAGE_SIZE;
4016}
4017
4018/**
4019 * e1000_receive_skb - helper function to handle rx indications
4020 * @adapter: board private structure
4021 * @status: descriptor status field as written by hardware
4022 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4023 * @skb: pointer to sk_buff to be indicated to stack
4024 */
4025static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4026 __le16 vlan, struct sk_buff *skb)
4027{
4028 skb->protocol = eth_type_trans(skb, adapter->netdev);
4029
4030 if (status & E1000_RXD_STAT_VP) {
4031 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4032
4033 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4034 }
4035 napi_gro_receive(&adapter->napi, skb);
4036}
4037
4038/**
4039 * e1000_tbi_adjust_stats
4040 * @hw: Struct containing variables accessed by shared code
4041 * @frame_len: The length of the frame in question
4042 * @mac_addr: The Ethernet destination address of the frame in question
4043 *
4044 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4045 */
4046static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4047 struct e1000_hw_stats *stats,
4048 u32 frame_len, const u8 *mac_addr)
4049{
4050 u64 carry_bit;
4051
4052 /* First adjust the frame length. */
4053 frame_len--;
4054 /* We need to adjust the statistics counters, since the hardware
4055 * counters overcount this packet as a CRC error and undercount
4056 * the packet as a good packet
4057 */
4058 /* This packet should not be counted as a CRC error. */
4059 stats->crcerrs--;
4060 /* This packet does count as a Good Packet Received. */
4061 stats->gprc++;
4062
4063 /* Adjust the Good Octets received counters */
4064 carry_bit = 0x80000000 & stats->gorcl;
4065 stats->gorcl += frame_len;
4066 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4067 * Received Count) was one before the addition,
4068 * AND it is zero after, then we lost the carry out,
4069 * need to add one to Gorch (Good Octets Received Count High).
4070 * This could be simplified if all environments supported
4071 * 64-bit integers.
4072 */
4073 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4074 stats->gorch++;
4075 /* Is this a broadcast or multicast? Check broadcast first,
4076 * since the test for a multicast frame will test positive on
4077 * a broadcast frame.
4078 */
4079 if (is_broadcast_ether_addr(mac_addr))
4080 stats->bprc++;
4081 else if (is_multicast_ether_addr(mac_addr))
4082 stats->mprc++;
4083
4084 if (frame_len == hw->max_frame_size) {
4085 /* In this case, the hardware has overcounted the number of
4086 * oversize frames.
4087 */
4088 if (stats->roc > 0)
4089 stats->roc--;
4090 }
4091
4092 /* Adjust the bin counters when the extra byte put the frame in the
4093 * wrong bin. Remember that the frame_len was adjusted above.
4094 */
4095 if (frame_len == 64) {
4096 stats->prc64++;
4097 stats->prc127--;
4098 } else if (frame_len == 127) {
4099 stats->prc127++;
4100 stats->prc255--;
4101 } else if (frame_len == 255) {
4102 stats->prc255++;
4103 stats->prc511--;
4104 } else if (frame_len == 511) {
4105 stats->prc511++;
4106 stats->prc1023--;
4107 } else if (frame_len == 1023) {
4108 stats->prc1023++;
4109 stats->prc1522--;
4110 } else if (frame_len == 1522) {
4111 stats->prc1522++;
4112 }
4113}
4114
4115static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4116 u8 status, u8 errors,
4117 u32 length, const u8 *data)
4118{
4119 struct e1000_hw *hw = &adapter->hw;
4120 u8 last_byte = *(data + length - 1);
4121
4122 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4123 unsigned long irq_flags;
4124
4125 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4126 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4127 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4128
4129 return true;
4130 }
4131
4132 return false;
4133}
4134
4135static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4136 unsigned int bufsz)
4137{
4138 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4139
4140 if (unlikely(!skb))
4141 adapter->alloc_rx_buff_failed++;
4142 return skb;
4143}
4144
4145/**
4146 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4147 * @adapter: board private structure
4148 * @rx_ring: ring to clean
4149 * @work_done: amount of napi work completed this call
4150 * @work_to_do: max amount of work allowed for this call to do
4151 *
4152 * the return value indicates whether actual cleaning was done, there
4153 * is no guarantee that everything was cleaned
4154 */
4155static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4156 struct e1000_rx_ring *rx_ring,
4157 int *work_done, int work_to_do)
4158{
4159 struct net_device *netdev = adapter->netdev;
4160 struct pci_dev *pdev = adapter->pdev;
4161 struct e1000_rx_desc *rx_desc, *next_rxd;
4162 struct e1000_rx_buffer *buffer_info, *next_buffer;
4163 u32 length;
4164 unsigned int i;
4165 int cleaned_count = 0;
4166 bool cleaned = false;
4167 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4168
4169 i = rx_ring->next_to_clean;
4170 rx_desc = E1000_RX_DESC(*rx_ring, i);
4171 buffer_info = &rx_ring->buffer_info[i];
4172
4173 while (rx_desc->status & E1000_RXD_STAT_DD) {
4174 struct sk_buff *skb;
4175 u8 status;
4176
4177 if (*work_done >= work_to_do)
4178 break;
4179 (*work_done)++;
4180 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4181
4182 status = rx_desc->status;
4183
4184 if (++i == rx_ring->count)
4185 i = 0;
4186
4187 next_rxd = E1000_RX_DESC(*rx_ring, i);
4188 prefetch(next_rxd);
4189
4190 next_buffer = &rx_ring->buffer_info[i];
4191
4192 cleaned = true;
4193 cleaned_count++;
4194 dma_unmap_page(&pdev->dev, buffer_info->dma,
4195 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4196 buffer_info->dma = 0;
4197
4198 length = le16_to_cpu(rx_desc->length);
4199
4200 /* errors is only valid for DD + EOP descriptors */
4201 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4202 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4203 u8 *mapped = page_address(buffer_info->rxbuf.page);
4204
4205 if (e1000_tbi_should_accept(adapter, status,
4206 rx_desc->errors,
4207 length, mapped)) {
4208 length--;
4209 } else if (netdev->features & NETIF_F_RXALL) {
4210 goto process_skb;
4211 } else {
4212 /* an error means any chain goes out the window
4213 * too
4214 */
4215 if (rx_ring->rx_skb_top)
4216 dev_kfree_skb(rx_ring->rx_skb_top);
4217 rx_ring->rx_skb_top = NULL;
4218 goto next_desc;
4219 }
4220 }
4221
4222#define rxtop rx_ring->rx_skb_top
4223process_skb:
4224 if (!(status & E1000_RXD_STAT_EOP)) {
4225 /* this descriptor is only the beginning (or middle) */
4226 if (!rxtop) {
4227 /* this is the beginning of a chain */
4228 rxtop = napi_get_frags(&adapter->napi);
4229 if (!rxtop)
4230 break;
4231
4232 skb_fill_page_desc(rxtop, 0,
4233 buffer_info->rxbuf.page,
4234 0, length);
4235 } else {
4236 /* this is the middle of a chain */
4237 skb_fill_page_desc(rxtop,
4238 skb_shinfo(rxtop)->nr_frags,
4239 buffer_info->rxbuf.page, 0, length);
4240 }
4241 e1000_consume_page(buffer_info, rxtop, length);
4242 goto next_desc;
4243 } else {
4244 if (rxtop) {
4245 /* end of the chain */
4246 skb_fill_page_desc(rxtop,
4247 skb_shinfo(rxtop)->nr_frags,
4248 buffer_info->rxbuf.page, 0, length);
4249 skb = rxtop;
4250 rxtop = NULL;
4251 e1000_consume_page(buffer_info, skb, length);
4252 } else {
4253 struct page *p;
4254 /* no chain, got EOP, this buf is the packet
4255 * copybreak to save the put_page/alloc_page
4256 */
4257 p = buffer_info->rxbuf.page;
4258 if (length <= copybreak) {
4259 u8 *vaddr;
4260
4261 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4262 length -= 4;
4263 skb = e1000_alloc_rx_skb(adapter,
4264 length);
4265 if (!skb)
4266 break;
4267
4268 vaddr = kmap_atomic(p);
4269 memcpy(skb_tail_pointer(skb), vaddr,
4270 length);
4271 kunmap_atomic(vaddr);
4272 /* re-use the page, so don't erase
4273 * buffer_info->rxbuf.page
4274 */
4275 skb_put(skb, length);
4276 e1000_rx_checksum(adapter,
4277 status | rx_desc->errors << 24,
4278 le16_to_cpu(rx_desc->csum), skb);
4279
4280 total_rx_bytes += skb->len;
4281 total_rx_packets++;
4282
4283 e1000_receive_skb(adapter, status,
4284 rx_desc->special, skb);
4285 goto next_desc;
4286 } else {
4287 skb = napi_get_frags(&adapter->napi);
4288 if (!skb) {
4289 adapter->alloc_rx_buff_failed++;
4290 break;
4291 }
4292 skb_fill_page_desc(skb, 0, p, 0,
4293 length);
4294 e1000_consume_page(buffer_info, skb,
4295 length);
4296 }
4297 }
4298 }
4299
4300 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4301 e1000_rx_checksum(adapter,
4302 (u32)(status) |
4303 ((u32)(rx_desc->errors) << 24),
4304 le16_to_cpu(rx_desc->csum), skb);
4305
4306 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4307 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4308 pskb_trim(skb, skb->len - 4);
4309 total_rx_packets++;
4310
4311 if (status & E1000_RXD_STAT_VP) {
4312 __le16 vlan = rx_desc->special;
4313 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4314
4315 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4316 }
4317
4318 napi_gro_frags(&adapter->napi);
4319
4320next_desc:
4321 rx_desc->status = 0;
4322
4323 /* return some buffers to hardware, one at a time is too slow */
4324 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4325 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4326 cleaned_count = 0;
4327 }
4328
4329 /* use prefetched values */
4330 rx_desc = next_rxd;
4331 buffer_info = next_buffer;
4332 }
4333 rx_ring->next_to_clean = i;
4334
4335 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4336 if (cleaned_count)
4337 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4338
4339 adapter->total_rx_packets += total_rx_packets;
4340 adapter->total_rx_bytes += total_rx_bytes;
4341 netdev->stats.rx_bytes += total_rx_bytes;
4342 netdev->stats.rx_packets += total_rx_packets;
4343 return cleaned;
4344}
4345
4346/* this should improve performance for small packets with large amounts
4347 * of reassembly being done in the stack
4348 */
4349static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4350 struct e1000_rx_buffer *buffer_info,
4351 u32 length, const void *data)
4352{
4353 struct sk_buff *skb;
4354
4355 if (length > copybreak)
4356 return NULL;
4357
4358 skb = e1000_alloc_rx_skb(adapter, length);
4359 if (!skb)
4360 return NULL;
4361
4362 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4363 length, DMA_FROM_DEVICE);
4364
4365 memcpy(skb_put(skb, length), data, length);
4366
4367 return skb;
4368}
4369
4370/**
4371 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4372 * @adapter: board private structure
4373 * @rx_ring: ring to clean
4374 * @work_done: amount of napi work completed this call
4375 * @work_to_do: max amount of work allowed for this call to do
4376 */
4377static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4378 struct e1000_rx_ring *rx_ring,
4379 int *work_done, int work_to_do)
4380{
4381 struct net_device *netdev = adapter->netdev;
4382 struct pci_dev *pdev = adapter->pdev;
4383 struct e1000_rx_desc *rx_desc, *next_rxd;
4384 struct e1000_rx_buffer *buffer_info, *next_buffer;
4385 u32 length;
4386 unsigned int i;
4387 int cleaned_count = 0;
4388 bool cleaned = false;
4389 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4390
4391 i = rx_ring->next_to_clean;
4392 rx_desc = E1000_RX_DESC(*rx_ring, i);
4393 buffer_info = &rx_ring->buffer_info[i];
4394
4395 while (rx_desc->status & E1000_RXD_STAT_DD) {
4396 struct sk_buff *skb;
4397 u8 *data;
4398 u8 status;
4399
4400 if (*work_done >= work_to_do)
4401 break;
4402 (*work_done)++;
4403 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4404
4405 status = rx_desc->status;
4406 length = le16_to_cpu(rx_desc->length);
4407
4408 data = buffer_info->rxbuf.data;
4409 prefetch(data);
4410 skb = e1000_copybreak(adapter, buffer_info, length, data);
4411 if (!skb) {
4412 unsigned int frag_len = e1000_frag_len(adapter);
4413
4414 skb = build_skb(data - E1000_HEADROOM, frag_len);
4415 if (!skb) {
4416 adapter->alloc_rx_buff_failed++;
4417 break;
4418 }
4419
4420 skb_reserve(skb, E1000_HEADROOM);
4421 dma_unmap_single(&pdev->dev, buffer_info->dma,
4422 adapter->rx_buffer_len,
4423 DMA_FROM_DEVICE);
4424 buffer_info->dma = 0;
4425 buffer_info->rxbuf.data = NULL;
4426 }
4427
4428 if (++i == rx_ring->count)
4429 i = 0;
4430
4431 next_rxd = E1000_RX_DESC(*rx_ring, i);
4432 prefetch(next_rxd);
4433
4434 next_buffer = &rx_ring->buffer_info[i];
4435
4436 cleaned = true;
4437 cleaned_count++;
4438
4439 /* !EOP means multiple descriptors were used to store a single
4440 * packet, if thats the case we need to toss it. In fact, we
4441 * to toss every packet with the EOP bit clear and the next
4442 * frame that _does_ have the EOP bit set, as it is by
4443 * definition only a frame fragment
4444 */
4445 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4446 adapter->discarding = true;
4447
4448 if (adapter->discarding) {
4449 /* All receives must fit into a single buffer */
4450 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4451 dev_kfree_skb(skb);
4452 if (status & E1000_RXD_STAT_EOP)
4453 adapter->discarding = false;
4454 goto next_desc;
4455 }
4456
4457 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4458 if (e1000_tbi_should_accept(adapter, status,
4459 rx_desc->errors,
4460 length, data)) {
4461 length--;
4462 } else if (netdev->features & NETIF_F_RXALL) {
4463 goto process_skb;
4464 } else {
4465 dev_kfree_skb(skb);
4466 goto next_desc;
4467 }
4468 }
4469
4470process_skb:
4471 total_rx_bytes += (length - 4); /* don't count FCS */
4472 total_rx_packets++;
4473
4474 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4475 /* adjust length to remove Ethernet CRC, this must be
4476 * done after the TBI_ACCEPT workaround above
4477 */
4478 length -= 4;
4479
4480 if (buffer_info->rxbuf.data == NULL)
4481 skb_put(skb, length);
4482 else /* copybreak skb */
4483 skb_trim(skb, length);
4484
4485 /* Receive Checksum Offload */
4486 e1000_rx_checksum(adapter,
4487 (u32)(status) |
4488 ((u32)(rx_desc->errors) << 24),
4489 le16_to_cpu(rx_desc->csum), skb);
4490
4491 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4492
4493next_desc:
4494 rx_desc->status = 0;
4495
4496 /* return some buffers to hardware, one at a time is too slow */
4497 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4498 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4499 cleaned_count = 0;
4500 }
4501
4502 /* use prefetched values */
4503 rx_desc = next_rxd;
4504 buffer_info = next_buffer;
4505 }
4506 rx_ring->next_to_clean = i;
4507
4508 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4509 if (cleaned_count)
4510 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4511
4512 adapter->total_rx_packets += total_rx_packets;
4513 adapter->total_rx_bytes += total_rx_bytes;
4514 netdev->stats.rx_bytes += total_rx_bytes;
4515 netdev->stats.rx_packets += total_rx_packets;
4516 return cleaned;
4517}
4518
4519/**
4520 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4521 * @adapter: address of board private structure
4522 * @rx_ring: pointer to receive ring structure
4523 * @cleaned_count: number of buffers to allocate this pass
4524 **/
4525static void
4526e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4527 struct e1000_rx_ring *rx_ring, int cleaned_count)
4528{
4529 struct pci_dev *pdev = adapter->pdev;
4530 struct e1000_rx_desc *rx_desc;
4531 struct e1000_rx_buffer *buffer_info;
4532 unsigned int i;
4533
4534 i = rx_ring->next_to_use;
4535 buffer_info = &rx_ring->buffer_info[i];
4536
4537 while (cleaned_count--) {
4538 /* allocate a new page if necessary */
4539 if (!buffer_info->rxbuf.page) {
4540 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4541 if (unlikely(!buffer_info->rxbuf.page)) {
4542 adapter->alloc_rx_buff_failed++;
4543 break;
4544 }
4545 }
4546
4547 if (!buffer_info->dma) {
4548 buffer_info->dma = dma_map_page(&pdev->dev,
4549 buffer_info->rxbuf.page, 0,
4550 adapter->rx_buffer_len,
4551 DMA_FROM_DEVICE);
4552 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4553 put_page(buffer_info->rxbuf.page);
4554 buffer_info->rxbuf.page = NULL;
4555 buffer_info->dma = 0;
4556 adapter->alloc_rx_buff_failed++;
4557 break;
4558 }
4559 }
4560
4561 rx_desc = E1000_RX_DESC(*rx_ring, i);
4562 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4563
4564 if (unlikely(++i == rx_ring->count))
4565 i = 0;
4566 buffer_info = &rx_ring->buffer_info[i];
4567 }
4568
4569 if (likely(rx_ring->next_to_use != i)) {
4570 rx_ring->next_to_use = i;
4571 if (unlikely(i-- == 0))
4572 i = (rx_ring->count - 1);
4573
4574 /* Force memory writes to complete before letting h/w
4575 * know there are new descriptors to fetch. (Only
4576 * applicable for weak-ordered memory model archs,
4577 * such as IA-64).
4578 */
4579 wmb();
4580 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4581 }
4582}
4583
4584/**
4585 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4586 * @adapter: address of board private structure
4587 **/
4588static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4589 struct e1000_rx_ring *rx_ring,
4590 int cleaned_count)
4591{
4592 struct e1000_hw *hw = &adapter->hw;
4593 struct pci_dev *pdev = adapter->pdev;
4594 struct e1000_rx_desc *rx_desc;
4595 struct e1000_rx_buffer *buffer_info;
4596 unsigned int i;
4597 unsigned int bufsz = adapter->rx_buffer_len;
4598
4599 i = rx_ring->next_to_use;
4600 buffer_info = &rx_ring->buffer_info[i];
4601
4602 while (cleaned_count--) {
4603 void *data;
4604
4605 if (buffer_info->rxbuf.data)
4606 goto skip;
4607
4608 data = e1000_alloc_frag(adapter);
4609 if (!data) {
4610 /* Better luck next round */
4611 adapter->alloc_rx_buff_failed++;
4612 break;
4613 }
4614
4615 /* Fix for errata 23, can't cross 64kB boundary */
4616 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4617 void *olddata = data;
4618 e_err(rx_err, "skb align check failed: %u bytes at "
4619 "%p\n", bufsz, data);
4620 /* Try again, without freeing the previous */
4621 data = e1000_alloc_frag(adapter);
4622 /* Failed allocation, critical failure */
4623 if (!data) {
4624 skb_free_frag(olddata);
4625 adapter->alloc_rx_buff_failed++;
4626 break;
4627 }
4628
4629 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4630 /* give up */
4631 skb_free_frag(data);
4632 skb_free_frag(olddata);
4633 adapter->alloc_rx_buff_failed++;
4634 break;
4635 }
4636
4637 /* Use new allocation */
4638 skb_free_frag(olddata);
4639 }
4640 buffer_info->dma = dma_map_single(&pdev->dev,
4641 data,
4642 adapter->rx_buffer_len,
4643 DMA_FROM_DEVICE);
4644 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4645 skb_free_frag(data);
4646 buffer_info->dma = 0;
4647 adapter->alloc_rx_buff_failed++;
4648 break;
4649 }
4650
4651 /* XXX if it was allocated cleanly it will never map to a
4652 * boundary crossing
4653 */
4654
4655 /* Fix for errata 23, can't cross 64kB boundary */
4656 if (!e1000_check_64k_bound(adapter,
4657 (void *)(unsigned long)buffer_info->dma,
4658 adapter->rx_buffer_len)) {
4659 e_err(rx_err, "dma align check failed: %u bytes at "
4660 "%p\n", adapter->rx_buffer_len,
4661 (void *)(unsigned long)buffer_info->dma);
4662
4663 dma_unmap_single(&pdev->dev, buffer_info->dma,
4664 adapter->rx_buffer_len,
4665 DMA_FROM_DEVICE);
4666
4667 skb_free_frag(data);
4668 buffer_info->rxbuf.data = NULL;
4669 buffer_info->dma = 0;
4670
4671 adapter->alloc_rx_buff_failed++;
4672 break;
4673 }
4674 buffer_info->rxbuf.data = data;
4675 skip:
4676 rx_desc = E1000_RX_DESC(*rx_ring, i);
4677 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4678
4679 if (unlikely(++i == rx_ring->count))
4680 i = 0;
4681 buffer_info = &rx_ring->buffer_info[i];
4682 }
4683
4684 if (likely(rx_ring->next_to_use != i)) {
4685 rx_ring->next_to_use = i;
4686 if (unlikely(i-- == 0))
4687 i = (rx_ring->count - 1);
4688
4689 /* Force memory writes to complete before letting h/w
4690 * know there are new descriptors to fetch. (Only
4691 * applicable for weak-ordered memory model archs,
4692 * such as IA-64).
4693 */
4694 wmb();
4695 writel(i, hw->hw_addr + rx_ring->rdt);
4696 }
4697}
4698
4699/**
4700 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4701 * @adapter:
4702 **/
4703static void e1000_smartspeed(struct e1000_adapter *adapter)
4704{
4705 struct e1000_hw *hw = &adapter->hw;
4706 u16 phy_status;
4707 u16 phy_ctrl;
4708
4709 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4710 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4711 return;
4712
4713 if (adapter->smartspeed == 0) {
4714 /* If Master/Slave config fault is asserted twice,
4715 * we assume back-to-back
4716 */
4717 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4718 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4719 return;
4720 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4721 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4722 return;
4723 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4724 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4725 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4726 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4727 phy_ctrl);
4728 adapter->smartspeed++;
4729 if (!e1000_phy_setup_autoneg(hw) &&
4730 !e1000_read_phy_reg(hw, PHY_CTRL,
4731 &phy_ctrl)) {
4732 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4733 MII_CR_RESTART_AUTO_NEG);
4734 e1000_write_phy_reg(hw, PHY_CTRL,
4735 phy_ctrl);
4736 }
4737 }
4738 return;
4739 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4740 /* If still no link, perhaps using 2/3 pair cable */
4741 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4742 phy_ctrl |= CR_1000T_MS_ENABLE;
4743 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4744 if (!e1000_phy_setup_autoneg(hw) &&
4745 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4746 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4747 MII_CR_RESTART_AUTO_NEG);
4748 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4749 }
4750 }
4751 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4752 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4753 adapter->smartspeed = 0;
4754}
4755
4756/**
4757 * e1000_ioctl -
4758 * @netdev:
4759 * @ifreq:
4760 * @cmd:
4761 **/
4762static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4763{
4764 switch (cmd) {
4765 case SIOCGMIIPHY:
4766 case SIOCGMIIREG:
4767 case SIOCSMIIREG:
4768 return e1000_mii_ioctl(netdev, ifr, cmd);
4769 default:
4770 return -EOPNOTSUPP;
4771 }
4772}
4773
4774/**
4775 * e1000_mii_ioctl -
4776 * @netdev:
4777 * @ifreq:
4778 * @cmd:
4779 **/
4780static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4781 int cmd)
4782{
4783 struct e1000_adapter *adapter = netdev_priv(netdev);
4784 struct e1000_hw *hw = &adapter->hw;
4785 struct mii_ioctl_data *data = if_mii(ifr);
4786 int retval;
4787 u16 mii_reg;
4788 unsigned long flags;
4789
4790 if (hw->media_type != e1000_media_type_copper)
4791 return -EOPNOTSUPP;
4792
4793 switch (cmd) {
4794 case SIOCGMIIPHY:
4795 data->phy_id = hw->phy_addr;
4796 break;
4797 case SIOCGMIIREG:
4798 spin_lock_irqsave(&adapter->stats_lock, flags);
4799 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4800 &data->val_out)) {
4801 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4802 return -EIO;
4803 }
4804 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4805 break;
4806 case SIOCSMIIREG:
4807 if (data->reg_num & ~(0x1F))
4808 return -EFAULT;
4809 mii_reg = data->val_in;
4810 spin_lock_irqsave(&adapter->stats_lock, flags);
4811 if (e1000_write_phy_reg(hw, data->reg_num,
4812 mii_reg)) {
4813 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4814 return -EIO;
4815 }
4816 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4817 if (hw->media_type == e1000_media_type_copper) {
4818 switch (data->reg_num) {
4819 case PHY_CTRL:
4820 if (mii_reg & MII_CR_POWER_DOWN)
4821 break;
4822 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4823 hw->autoneg = 1;
4824 hw->autoneg_advertised = 0x2F;
4825 } else {
4826 u32 speed;
4827 if (mii_reg & 0x40)
4828 speed = SPEED_1000;
4829 else if (mii_reg & 0x2000)
4830 speed = SPEED_100;
4831 else
4832 speed = SPEED_10;
4833 retval = e1000_set_spd_dplx(
4834 adapter, speed,
4835 ((mii_reg & 0x100)
4836 ? DUPLEX_FULL :
4837 DUPLEX_HALF));
4838 if (retval)
4839 return retval;
4840 }
4841 if (netif_running(adapter->netdev))
4842 e1000_reinit_locked(adapter);
4843 else
4844 e1000_reset(adapter);
4845 break;
4846 case M88E1000_PHY_SPEC_CTRL:
4847 case M88E1000_EXT_PHY_SPEC_CTRL:
4848 if (e1000_phy_reset(hw))
4849 return -EIO;
4850 break;
4851 }
4852 } else {
4853 switch (data->reg_num) {
4854 case PHY_CTRL:
4855 if (mii_reg & MII_CR_POWER_DOWN)
4856 break;
4857 if (netif_running(adapter->netdev))
4858 e1000_reinit_locked(adapter);
4859 else
4860 e1000_reset(adapter);
4861 break;
4862 }
4863 }
4864 break;
4865 default:
4866 return -EOPNOTSUPP;
4867 }
4868 return E1000_SUCCESS;
4869}
4870
4871void e1000_pci_set_mwi(struct e1000_hw *hw)
4872{
4873 struct e1000_adapter *adapter = hw->back;
4874 int ret_val = pci_set_mwi(adapter->pdev);
4875
4876 if (ret_val)
4877 e_err(probe, "Error in setting MWI\n");
4878}
4879
4880void e1000_pci_clear_mwi(struct e1000_hw *hw)
4881{
4882 struct e1000_adapter *adapter = hw->back;
4883
4884 pci_clear_mwi(adapter->pdev);
4885}
4886
4887int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4888{
4889 struct e1000_adapter *adapter = hw->back;
4890 return pcix_get_mmrbc(adapter->pdev);
4891}
4892
4893void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4894{
4895 struct e1000_adapter *adapter = hw->back;
4896 pcix_set_mmrbc(adapter->pdev, mmrbc);
4897}
4898
4899void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4900{
4901 outl(value, port);
4902}
4903
4904static bool e1000_vlan_used(struct e1000_adapter *adapter)
4905{
4906 u16 vid;
4907
4908 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4909 return true;
4910 return false;
4911}
4912
4913static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4914 netdev_features_t features)
4915{
4916 struct e1000_hw *hw = &adapter->hw;
4917 u32 ctrl;
4918
4919 ctrl = er32(CTRL);
4920 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4921 /* enable VLAN tag insert/strip */
4922 ctrl |= E1000_CTRL_VME;
4923 } else {
4924 /* disable VLAN tag insert/strip */
4925 ctrl &= ~E1000_CTRL_VME;
4926 }
4927 ew32(CTRL, ctrl);
4928}
4929static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4930 bool filter_on)
4931{
4932 struct e1000_hw *hw = &adapter->hw;
4933 u32 rctl;
4934
4935 if (!test_bit(__E1000_DOWN, &adapter->flags))
4936 e1000_irq_disable(adapter);
4937
4938 __e1000_vlan_mode(adapter, adapter->netdev->features);
4939 if (filter_on) {
4940 /* enable VLAN receive filtering */
4941 rctl = er32(RCTL);
4942 rctl &= ~E1000_RCTL_CFIEN;
4943 if (!(adapter->netdev->flags & IFF_PROMISC))
4944 rctl |= E1000_RCTL_VFE;
4945 ew32(RCTL, rctl);
4946 e1000_update_mng_vlan(adapter);
4947 } else {
4948 /* disable VLAN receive filtering */
4949 rctl = er32(RCTL);
4950 rctl &= ~E1000_RCTL_VFE;
4951 ew32(RCTL, rctl);
4952 }
4953
4954 if (!test_bit(__E1000_DOWN, &adapter->flags))
4955 e1000_irq_enable(adapter);
4956}
4957
4958static void e1000_vlan_mode(struct net_device *netdev,
4959 netdev_features_t features)
4960{
4961 struct e1000_adapter *adapter = netdev_priv(netdev);
4962
4963 if (!test_bit(__E1000_DOWN, &adapter->flags))
4964 e1000_irq_disable(adapter);
4965
4966 __e1000_vlan_mode(adapter, features);
4967
4968 if (!test_bit(__E1000_DOWN, &adapter->flags))
4969 e1000_irq_enable(adapter);
4970}
4971
4972static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4973 __be16 proto, u16 vid)
4974{
4975 struct e1000_adapter *adapter = netdev_priv(netdev);
4976 struct e1000_hw *hw = &adapter->hw;
4977 u32 vfta, index;
4978
4979 if ((hw->mng_cookie.status &
4980 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4981 (vid == adapter->mng_vlan_id))
4982 return 0;
4983
4984 if (!e1000_vlan_used(adapter))
4985 e1000_vlan_filter_on_off(adapter, true);
4986
4987 /* add VID to filter table */
4988 index = (vid >> 5) & 0x7F;
4989 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4990 vfta |= (1 << (vid & 0x1F));
4991 e1000_write_vfta(hw, index, vfta);
4992
4993 set_bit(vid, adapter->active_vlans);
4994
4995 return 0;
4996}
4997
4998static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4999 __be16 proto, u16 vid)
5000{
5001 struct e1000_adapter *adapter = netdev_priv(netdev);
5002 struct e1000_hw *hw = &adapter->hw;
5003 u32 vfta, index;
5004
5005 if (!test_bit(__E1000_DOWN, &adapter->flags))
5006 e1000_irq_disable(adapter);
5007 if (!test_bit(__E1000_DOWN, &adapter->flags))
5008 e1000_irq_enable(adapter);
5009
5010 /* remove VID from filter table */
5011 index = (vid >> 5) & 0x7F;
5012 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5013 vfta &= ~(1 << (vid & 0x1F));
5014 e1000_write_vfta(hw, index, vfta);
5015
5016 clear_bit(vid, adapter->active_vlans);
5017
5018 if (!e1000_vlan_used(adapter))
5019 e1000_vlan_filter_on_off(adapter, false);
5020
5021 return 0;
5022}
5023
5024static void e1000_restore_vlan(struct e1000_adapter *adapter)
5025{
5026 u16 vid;
5027
5028 if (!e1000_vlan_used(adapter))
5029 return;
5030
5031 e1000_vlan_filter_on_off(adapter, true);
5032 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5033 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5034}
5035
5036int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5037{
5038 struct e1000_hw *hw = &adapter->hw;
5039
5040 hw->autoneg = 0;
5041
5042 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5043 * for the switch() below to work
5044 */
5045 if ((spd & 1) || (dplx & ~1))
5046 goto err_inval;
5047
5048 /* Fiber NICs only allow 1000 gbps Full duplex */
5049 if ((hw->media_type == e1000_media_type_fiber) &&
5050 spd != SPEED_1000 &&
5051 dplx != DUPLEX_FULL)
5052 goto err_inval;
5053
5054 switch (spd + dplx) {
5055 case SPEED_10 + DUPLEX_HALF:
5056 hw->forced_speed_duplex = e1000_10_half;
5057 break;
5058 case SPEED_10 + DUPLEX_FULL:
5059 hw->forced_speed_duplex = e1000_10_full;
5060 break;
5061 case SPEED_100 + DUPLEX_HALF:
5062 hw->forced_speed_duplex = e1000_100_half;
5063 break;
5064 case SPEED_100 + DUPLEX_FULL:
5065 hw->forced_speed_duplex = e1000_100_full;
5066 break;
5067 case SPEED_1000 + DUPLEX_FULL:
5068 hw->autoneg = 1;
5069 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5070 break;
5071 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5072 default:
5073 goto err_inval;
5074 }
5075
5076 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5077 hw->mdix = AUTO_ALL_MODES;
5078
5079 return 0;
5080
5081err_inval:
5082 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5083 return -EINVAL;
5084}
5085
5086static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5087{
5088 struct net_device *netdev = pci_get_drvdata(pdev);
5089 struct e1000_adapter *adapter = netdev_priv(netdev);
5090 struct e1000_hw *hw = &adapter->hw;
5091 u32 ctrl, ctrl_ext, rctl, status;
5092 u32 wufc = adapter->wol;
5093#ifdef CONFIG_PM
5094 int retval = 0;
5095#endif
5096
5097 netif_device_detach(netdev);
5098
5099 if (netif_running(netdev)) {
5100 int count = E1000_CHECK_RESET_COUNT;
5101
5102 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5103 usleep_range(10000, 20000);
5104
5105 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5106 e1000_down(adapter);
5107 }
5108
5109#ifdef CONFIG_PM
5110 retval = pci_save_state(pdev);
5111 if (retval)
5112 return retval;
5113#endif
5114
5115 status = er32(STATUS);
5116 if (status & E1000_STATUS_LU)
5117 wufc &= ~E1000_WUFC_LNKC;
5118
5119 if (wufc) {
5120 e1000_setup_rctl(adapter);
5121 e1000_set_rx_mode(netdev);
5122
5123 rctl = er32(RCTL);
5124
5125 /* turn on all-multi mode if wake on multicast is enabled */
5126 if (wufc & E1000_WUFC_MC)
5127 rctl |= E1000_RCTL_MPE;
5128
5129 /* enable receives in the hardware */
5130 ew32(RCTL, rctl | E1000_RCTL_EN);
5131
5132 if (hw->mac_type >= e1000_82540) {
5133 ctrl = er32(CTRL);
5134 /* advertise wake from D3Cold */
5135 #define E1000_CTRL_ADVD3WUC 0x00100000
5136 /* phy power management enable */
5137 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5138 ctrl |= E1000_CTRL_ADVD3WUC |
5139 E1000_CTRL_EN_PHY_PWR_MGMT;
5140 ew32(CTRL, ctrl);
5141 }
5142
5143 if (hw->media_type == e1000_media_type_fiber ||
5144 hw->media_type == e1000_media_type_internal_serdes) {
5145 /* keep the laser running in D3 */
5146 ctrl_ext = er32(CTRL_EXT);
5147 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5148 ew32(CTRL_EXT, ctrl_ext);
5149 }
5150
5151 ew32(WUC, E1000_WUC_PME_EN);
5152 ew32(WUFC, wufc);
5153 } else {
5154 ew32(WUC, 0);
5155 ew32(WUFC, 0);
5156 }
5157
5158 e1000_release_manageability(adapter);
5159
5160 *enable_wake = !!wufc;
5161
5162 /* make sure adapter isn't asleep if manageability is enabled */
5163 if (adapter->en_mng_pt)
5164 *enable_wake = true;
5165
5166 if (netif_running(netdev))
5167 e1000_free_irq(adapter);
5168
5169 pci_disable_device(pdev);
5170
5171 return 0;
5172}
5173
5174#ifdef CONFIG_PM
5175static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5176{
5177 int retval;
5178 bool wake;
5179
5180 retval = __e1000_shutdown(pdev, &wake);
5181 if (retval)
5182 return retval;
5183
5184 if (wake) {
5185 pci_prepare_to_sleep(pdev);
5186 } else {
5187 pci_wake_from_d3(pdev, false);
5188 pci_set_power_state(pdev, PCI_D3hot);
5189 }
5190
5191 return 0;
5192}
5193
5194static int e1000_resume(struct pci_dev *pdev)
5195{
5196 struct net_device *netdev = pci_get_drvdata(pdev);
5197 struct e1000_adapter *adapter = netdev_priv(netdev);
5198 struct e1000_hw *hw = &adapter->hw;
5199 u32 err;
5200
5201 pci_set_power_state(pdev, PCI_D0);
5202 pci_restore_state(pdev);
5203 pci_save_state(pdev);
5204
5205 if (adapter->need_ioport)
5206 err = pci_enable_device(pdev);
5207 else
5208 err = pci_enable_device_mem(pdev);
5209 if (err) {
5210 pr_err("Cannot enable PCI device from suspend\n");
5211 return err;
5212 }
5213 pci_set_master(pdev);
5214
5215 pci_enable_wake(pdev, PCI_D3hot, 0);
5216 pci_enable_wake(pdev, PCI_D3cold, 0);
5217
5218 if (netif_running(netdev)) {
5219 err = e1000_request_irq(adapter);
5220 if (err)
5221 return err;
5222 }
5223
5224 e1000_power_up_phy(adapter);
5225 e1000_reset(adapter);
5226 ew32(WUS, ~0);
5227
5228 e1000_init_manageability(adapter);
5229
5230 if (netif_running(netdev))
5231 e1000_up(adapter);
5232
5233 netif_device_attach(netdev);
5234
5235 return 0;
5236}
5237#endif
5238
5239static void e1000_shutdown(struct pci_dev *pdev)
5240{
5241 bool wake;
5242
5243 __e1000_shutdown(pdev, &wake);
5244
5245 if (system_state == SYSTEM_POWER_OFF) {
5246 pci_wake_from_d3(pdev, wake);
5247 pci_set_power_state(pdev, PCI_D3hot);
5248 }
5249}
5250
5251#ifdef CONFIG_NET_POLL_CONTROLLER
5252/* Polling 'interrupt' - used by things like netconsole to send skbs
5253 * without having to re-enable interrupts. It's not called while
5254 * the interrupt routine is executing.
5255 */
5256static void e1000_netpoll(struct net_device *netdev)
5257{
5258 struct e1000_adapter *adapter = netdev_priv(netdev);
5259
5260 disable_irq(adapter->pdev->irq);
5261 e1000_intr(adapter->pdev->irq, netdev);
5262 enable_irq(adapter->pdev->irq);
5263}
5264#endif
5265
5266/**
5267 * e1000_io_error_detected - called when PCI error is detected
5268 * @pdev: Pointer to PCI device
5269 * @state: The current pci connection state
5270 *
5271 * This function is called after a PCI bus error affecting
5272 * this device has been detected.
5273 */
5274static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5275 pci_channel_state_t state)
5276{
5277 struct net_device *netdev = pci_get_drvdata(pdev);
5278 struct e1000_adapter *adapter = netdev_priv(netdev);
5279
5280 netif_device_detach(netdev);
5281
5282 if (state == pci_channel_io_perm_failure)
5283 return PCI_ERS_RESULT_DISCONNECT;
5284
5285 if (netif_running(netdev))
5286 e1000_down(adapter);
5287 pci_disable_device(pdev);
5288
5289 /* Request a slot slot reset. */
5290 return PCI_ERS_RESULT_NEED_RESET;
5291}
5292
5293/**
5294 * e1000_io_slot_reset - called after the pci bus has been reset.
5295 * @pdev: Pointer to PCI device
5296 *
5297 * Restart the card from scratch, as if from a cold-boot. Implementation
5298 * resembles the first-half of the e1000_resume routine.
5299 */
5300static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5301{
5302 struct net_device *netdev = pci_get_drvdata(pdev);
5303 struct e1000_adapter *adapter = netdev_priv(netdev);
5304 struct e1000_hw *hw = &adapter->hw;
5305 int err;
5306
5307 if (adapter->need_ioport)
5308 err = pci_enable_device(pdev);
5309 else
5310 err = pci_enable_device_mem(pdev);
5311 if (err) {
5312 pr_err("Cannot re-enable PCI device after reset.\n");
5313 return PCI_ERS_RESULT_DISCONNECT;
5314 }
5315 pci_set_master(pdev);
5316
5317 pci_enable_wake(pdev, PCI_D3hot, 0);
5318 pci_enable_wake(pdev, PCI_D3cold, 0);
5319
5320 e1000_reset(adapter);
5321 ew32(WUS, ~0);
5322
5323 return PCI_ERS_RESULT_RECOVERED;
5324}
5325
5326/**
5327 * e1000_io_resume - called when traffic can start flowing again.
5328 * @pdev: Pointer to PCI device
5329 *
5330 * This callback is called when the error recovery driver tells us that
5331 * its OK to resume normal operation. Implementation resembles the
5332 * second-half of the e1000_resume routine.
5333 */
5334static void e1000_io_resume(struct pci_dev *pdev)
5335{
5336 struct net_device *netdev = pci_get_drvdata(pdev);
5337 struct e1000_adapter *adapter = netdev_priv(netdev);
5338
5339 e1000_init_manageability(adapter);
5340
5341 if (netif_running(netdev)) {
5342 if (e1000_up(adapter)) {
5343 pr_info("can't bring device back up after reset\n");
5344 return;
5345 }
5346 }
5347
5348 netif_device_attach(netdev);
5349}
5350
5351/* e1000_main.c */