Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2006 Intel Corporation. */
   3
   4#include "e1000.h"
   5#include <net/ip6_checksum.h>
   6#include <linux/io.h>
   7#include <linux/prefetch.h>
   8#include <linux/bitops.h>
   9#include <linux/if_vlan.h>
  10
  11char e1000_driver_name[] = "e1000";
  12static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  13static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  14
  15/* e1000_pci_tbl - PCI Device ID Table
  16 *
  17 * Last entry must be all 0s
  18 *
  19 * Macro expands to...
  20 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  21 */
  22static const struct pci_device_id e1000_pci_tbl[] = {
  23	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  24	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  25	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  26	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  27	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  28	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  29	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  30	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  31	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  32	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  33	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  34	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  35	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  36	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  37	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  38	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  39	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  40	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  41	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  42	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  43	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  44	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  45	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  46	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  47	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  48	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  49	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  50	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  51	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  53	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  54	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  55	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  56	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  57	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  58	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  59	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  60	/* required last entry */
  61	{0,}
  62};
  63
  64MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  65
  66int e1000_up(struct e1000_adapter *adapter);
  67void e1000_down(struct e1000_adapter *adapter);
  68void e1000_reinit_locked(struct e1000_adapter *adapter);
  69void e1000_reset(struct e1000_adapter *adapter);
  70int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  71int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  72void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  73void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  74static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  75				    struct e1000_tx_ring *txdr);
  76static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  77				    struct e1000_rx_ring *rxdr);
  78static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  79				    struct e1000_tx_ring *tx_ring);
  80static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  81				    struct e1000_rx_ring *rx_ring);
  82void e1000_update_stats(struct e1000_adapter *adapter);
  83
  84static int e1000_init_module(void);
  85static void e1000_exit_module(void);
  86static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  87static void e1000_remove(struct pci_dev *pdev);
  88static int e1000_alloc_queues(struct e1000_adapter *adapter);
  89static int e1000_sw_init(struct e1000_adapter *adapter);
  90int e1000_open(struct net_device *netdev);
  91int e1000_close(struct net_device *netdev);
  92static void e1000_configure_tx(struct e1000_adapter *adapter);
  93static void e1000_configure_rx(struct e1000_adapter *adapter);
  94static void e1000_setup_rctl(struct e1000_adapter *adapter);
  95static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  96static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  97static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  98				struct e1000_tx_ring *tx_ring);
  99static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 100				struct e1000_rx_ring *rx_ring);
 101static void e1000_set_rx_mode(struct net_device *netdev);
 102static void e1000_update_phy_info_task(struct work_struct *work);
 103static void e1000_watchdog(struct work_struct *work);
 104static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 106				    struct net_device *netdev);
 107static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 108static int e1000_set_mac(struct net_device *netdev, void *p);
 109static irqreturn_t e1000_intr(int irq, void *data);
 110static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 111			       struct e1000_tx_ring *tx_ring);
 112static int e1000_clean(struct napi_struct *napi, int budget);
 113static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 114			       struct e1000_rx_ring *rx_ring,
 115			       int *work_done, int work_to_do);
 116static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 117				     struct e1000_rx_ring *rx_ring,
 118				     int *work_done, int work_to_do);
 119static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 120					 struct e1000_rx_ring *rx_ring,
 121					 int cleaned_count)
 122{
 123}
 124static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 125				   struct e1000_rx_ring *rx_ring,
 126				   int cleaned_count);
 127static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 128					 struct e1000_rx_ring *rx_ring,
 129					 int cleaned_count);
 130static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 131static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 132			   int cmd);
 133static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 134static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 135static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
 136static void e1000_reset_task(struct work_struct *work);
 137static void e1000_smartspeed(struct e1000_adapter *adapter);
 138static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 139				       struct sk_buff *skb);
 140
 141static bool e1000_vlan_used(struct e1000_adapter *adapter);
 142static void e1000_vlan_mode(struct net_device *netdev,
 143			    netdev_features_t features);
 144static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 145				     bool filter_on);
 146static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 147				 __be16 proto, u16 vid);
 148static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 149				  __be16 proto, u16 vid);
 150static void e1000_restore_vlan(struct e1000_adapter *adapter);
 151
 152static int __maybe_unused e1000_suspend(struct device *dev);
 153static int __maybe_unused e1000_resume(struct device *dev);
 154static void e1000_shutdown(struct pci_dev *pdev);
 155
 156#ifdef CONFIG_NET_POLL_CONTROLLER
 157/* for netdump / net console */
 158static void e1000_netpoll (struct net_device *netdev);
 159#endif
 160
 161#define COPYBREAK_DEFAULT 256
 162static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 163module_param(copybreak, uint, 0644);
 164MODULE_PARM_DESC(copybreak,
 165	"Maximum size of packet that is copied to a new buffer on receive");
 166
 167static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 168						pci_channel_state_t state);
 169static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 170static void e1000_io_resume(struct pci_dev *pdev);
 171
 172static const struct pci_error_handlers e1000_err_handler = {
 173	.error_detected = e1000_io_error_detected,
 174	.slot_reset = e1000_io_slot_reset,
 175	.resume = e1000_io_resume,
 176};
 177
 178static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
 179
 180static struct pci_driver e1000_driver = {
 181	.name     = e1000_driver_name,
 182	.id_table = e1000_pci_tbl,
 183	.probe    = e1000_probe,
 184	.remove   = e1000_remove,
 185	.driver = {
 186		.pm = &e1000_pm_ops,
 187	},
 188	.shutdown = e1000_shutdown,
 189	.err_handler = &e1000_err_handler
 190};
 191
 192MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 193MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 194MODULE_LICENSE("GPL v2");
 195
 196#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 197static int debug = -1;
 198module_param(debug, int, 0);
 199MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 200
 201/**
 202 * e1000_get_hw_dev - helper function for getting netdev
 203 * @hw: pointer to HW struct
 204 *
 205 * return device used by hardware layer to print debugging information
 206 *
 207 **/
 208struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 209{
 210	struct e1000_adapter *adapter = hw->back;
 211	return adapter->netdev;
 212}
 213
 214/**
 215 * e1000_init_module - Driver Registration Routine
 216 *
 217 * e1000_init_module is the first routine called when the driver is
 218 * loaded. All it does is register with the PCI subsystem.
 219 **/
 220static int __init e1000_init_module(void)
 221{
 222	int ret;
 223	pr_info("%s\n", e1000_driver_string);
 224
 225	pr_info("%s\n", e1000_copyright);
 226
 227	ret = pci_register_driver(&e1000_driver);
 228	if (copybreak != COPYBREAK_DEFAULT) {
 229		if (copybreak == 0)
 230			pr_info("copybreak disabled\n");
 231		else
 232			pr_info("copybreak enabled for "
 233				   "packets <= %u bytes\n", copybreak);
 234	}
 235	return ret;
 236}
 237
 238module_init(e1000_init_module);
 239
 240/**
 241 * e1000_exit_module - Driver Exit Cleanup Routine
 242 *
 243 * e1000_exit_module is called just before the driver is removed
 244 * from memory.
 245 **/
 246static void __exit e1000_exit_module(void)
 247{
 248	pci_unregister_driver(&e1000_driver);
 249}
 250
 251module_exit(e1000_exit_module);
 252
 253static int e1000_request_irq(struct e1000_adapter *adapter)
 254{
 255	struct net_device *netdev = adapter->netdev;
 256	irq_handler_t handler = e1000_intr;
 257	int irq_flags = IRQF_SHARED;
 258	int err;
 259
 260	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 261			  netdev);
 262	if (err) {
 263		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 264	}
 265
 266	return err;
 267}
 268
 269static void e1000_free_irq(struct e1000_adapter *adapter)
 270{
 271	struct net_device *netdev = adapter->netdev;
 272
 273	free_irq(adapter->pdev->irq, netdev);
 274}
 275
 276/**
 277 * e1000_irq_disable - Mask off interrupt generation on the NIC
 278 * @adapter: board private structure
 279 **/
 280static void e1000_irq_disable(struct e1000_adapter *adapter)
 281{
 282	struct e1000_hw *hw = &adapter->hw;
 283
 284	ew32(IMC, ~0);
 285	E1000_WRITE_FLUSH();
 286	synchronize_irq(adapter->pdev->irq);
 287}
 288
 289/**
 290 * e1000_irq_enable - Enable default interrupt generation settings
 291 * @adapter: board private structure
 292 **/
 293static void e1000_irq_enable(struct e1000_adapter *adapter)
 294{
 295	struct e1000_hw *hw = &adapter->hw;
 296
 297	ew32(IMS, IMS_ENABLE_MASK);
 298	E1000_WRITE_FLUSH();
 299}
 300
 301static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 302{
 303	struct e1000_hw *hw = &adapter->hw;
 304	struct net_device *netdev = adapter->netdev;
 305	u16 vid = hw->mng_cookie.vlan_id;
 306	u16 old_vid = adapter->mng_vlan_id;
 307
 308	if (!e1000_vlan_used(adapter))
 309		return;
 310
 311	if (!test_bit(vid, adapter->active_vlans)) {
 312		if (hw->mng_cookie.status &
 313		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 314			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 315			adapter->mng_vlan_id = vid;
 316		} else {
 317			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 318		}
 319		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 320		    (vid != old_vid) &&
 321		    !test_bit(old_vid, adapter->active_vlans))
 322			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 323					       old_vid);
 324	} else {
 325		adapter->mng_vlan_id = vid;
 326	}
 327}
 328
 329static void e1000_init_manageability(struct e1000_adapter *adapter)
 330{
 331	struct e1000_hw *hw = &adapter->hw;
 332
 333	if (adapter->en_mng_pt) {
 334		u32 manc = er32(MANC);
 335
 336		/* disable hardware interception of ARP */
 337		manc &= ~(E1000_MANC_ARP_EN);
 338
 339		ew32(MANC, manc);
 340	}
 341}
 342
 343static void e1000_release_manageability(struct e1000_adapter *adapter)
 344{
 345	struct e1000_hw *hw = &adapter->hw;
 346
 347	if (adapter->en_mng_pt) {
 348		u32 manc = er32(MANC);
 349
 350		/* re-enable hardware interception of ARP */
 351		manc |= E1000_MANC_ARP_EN;
 352
 353		ew32(MANC, manc);
 354	}
 355}
 356
 357/**
 358 * e1000_configure - configure the hardware for RX and TX
 359 * @adapter: private board structure
 360 **/
 361static void e1000_configure(struct e1000_adapter *adapter)
 362{
 363	struct net_device *netdev = adapter->netdev;
 364	int i;
 365
 366	e1000_set_rx_mode(netdev);
 367
 368	e1000_restore_vlan(adapter);
 369	e1000_init_manageability(adapter);
 370
 371	e1000_configure_tx(adapter);
 372	e1000_setup_rctl(adapter);
 373	e1000_configure_rx(adapter);
 374	/* call E1000_DESC_UNUSED which always leaves
 375	 * at least 1 descriptor unused to make sure
 376	 * next_to_use != next_to_clean
 377	 */
 378	for (i = 0; i < adapter->num_rx_queues; i++) {
 379		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 380		adapter->alloc_rx_buf(adapter, ring,
 381				      E1000_DESC_UNUSED(ring));
 382	}
 383}
 384
 385int e1000_up(struct e1000_adapter *adapter)
 386{
 387	struct e1000_hw *hw = &adapter->hw;
 388
 389	/* hardware has been reset, we need to reload some things */
 390	e1000_configure(adapter);
 391
 392	clear_bit(__E1000_DOWN, &adapter->flags);
 393
 394	napi_enable(&adapter->napi);
 395
 396	e1000_irq_enable(adapter);
 397
 398	netif_wake_queue(adapter->netdev);
 399
 400	/* fire a link change interrupt to start the watchdog */
 401	ew32(ICS, E1000_ICS_LSC);
 402	return 0;
 403}
 404
 405/**
 406 * e1000_power_up_phy - restore link in case the phy was powered down
 407 * @adapter: address of board private structure
 408 *
 409 * The phy may be powered down to save power and turn off link when the
 410 * driver is unloaded and wake on lan is not enabled (among others)
 411 * *** this routine MUST be followed by a call to e1000_reset ***
 412 **/
 413void e1000_power_up_phy(struct e1000_adapter *adapter)
 414{
 415	struct e1000_hw *hw = &adapter->hw;
 416	u16 mii_reg = 0;
 417
 418	/* Just clear the power down bit to wake the phy back up */
 419	if (hw->media_type == e1000_media_type_copper) {
 420		/* according to the manual, the phy will retain its
 421		 * settings across a power-down/up cycle
 422		 */
 423		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 424		mii_reg &= ~MII_CR_POWER_DOWN;
 425		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 426	}
 427}
 428
 429static void e1000_power_down_phy(struct e1000_adapter *adapter)
 430{
 431	struct e1000_hw *hw = &adapter->hw;
 432
 433	/* Power down the PHY so no link is implied when interface is down *
 434	 * The PHY cannot be powered down if any of the following is true *
 435	 * (a) WoL is enabled
 436	 * (b) AMT is active
 437	 * (c) SoL/IDER session is active
 438	 */
 439	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 440	   hw->media_type == e1000_media_type_copper) {
 441		u16 mii_reg = 0;
 442
 443		switch (hw->mac_type) {
 444		case e1000_82540:
 445		case e1000_82545:
 446		case e1000_82545_rev_3:
 447		case e1000_82546:
 448		case e1000_ce4100:
 449		case e1000_82546_rev_3:
 450		case e1000_82541:
 451		case e1000_82541_rev_2:
 452		case e1000_82547:
 453		case e1000_82547_rev_2:
 454			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 455				goto out;
 456			break;
 457		default:
 458			goto out;
 459		}
 460		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 461		mii_reg |= MII_CR_POWER_DOWN;
 462		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 463		msleep(1);
 464	}
 465out:
 466	return;
 467}
 468
 469static void e1000_down_and_stop(struct e1000_adapter *adapter)
 470{
 471	set_bit(__E1000_DOWN, &adapter->flags);
 472
 473	cancel_delayed_work_sync(&adapter->watchdog_task);
 474
 475	/*
 476	 * Since the watchdog task can reschedule other tasks, we should cancel
 477	 * it first, otherwise we can run into the situation when a work is
 478	 * still running after the adapter has been turned down.
 479	 */
 480
 481	cancel_delayed_work_sync(&adapter->phy_info_task);
 482	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 483
 484	/* Only kill reset task if adapter is not resetting */
 485	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 486		cancel_work_sync(&adapter->reset_task);
 487}
 488
 489void e1000_down(struct e1000_adapter *adapter)
 490{
 491	struct e1000_hw *hw = &adapter->hw;
 492	struct net_device *netdev = adapter->netdev;
 493	u32 rctl, tctl;
 494
 495	/* disable receives in the hardware */
 496	rctl = er32(RCTL);
 497	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 498	/* flush and sleep below */
 499
 500	netif_tx_disable(netdev);
 501
 502	/* disable transmits in the hardware */
 503	tctl = er32(TCTL);
 504	tctl &= ~E1000_TCTL_EN;
 505	ew32(TCTL, tctl);
 506	/* flush both disables and wait for them to finish */
 507	E1000_WRITE_FLUSH();
 508	msleep(10);
 509
 510	/* Set the carrier off after transmits have been disabled in the
 511	 * hardware, to avoid race conditions with e1000_watchdog() (which
 512	 * may be running concurrently to us, checking for the carrier
 513	 * bit to decide whether it should enable transmits again). Such
 514	 * a race condition would result into transmission being disabled
 515	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 516	 */
 517	netif_carrier_off(netdev);
 518
 519	napi_disable(&adapter->napi);
 520
 521	e1000_irq_disable(adapter);
 522
 523	/* Setting DOWN must be after irq_disable to prevent
 524	 * a screaming interrupt.  Setting DOWN also prevents
 525	 * tasks from rescheduling.
 526	 */
 527	e1000_down_and_stop(adapter);
 528
 529	adapter->link_speed = 0;
 530	adapter->link_duplex = 0;
 531
 532	e1000_reset(adapter);
 533	e1000_clean_all_tx_rings(adapter);
 534	e1000_clean_all_rx_rings(adapter);
 535}
 536
 537void e1000_reinit_locked(struct e1000_adapter *adapter)
 538{
 
 539	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 540		msleep(1);
 541
 542	/* only run the task if not already down */
 543	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
 544		e1000_down(adapter);
 545		e1000_up(adapter);
 546	}
 547
 548	clear_bit(__E1000_RESETTING, &adapter->flags);
 549}
 550
 551void e1000_reset(struct e1000_adapter *adapter)
 552{
 553	struct e1000_hw *hw = &adapter->hw;
 554	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 555	bool legacy_pba_adjust = false;
 556	u16 hwm;
 557
 558	/* Repartition Pba for greater than 9k mtu
 559	 * To take effect CTRL.RST is required.
 560	 */
 561
 562	switch (hw->mac_type) {
 563	case e1000_82542_rev2_0:
 564	case e1000_82542_rev2_1:
 565	case e1000_82543:
 566	case e1000_82544:
 567	case e1000_82540:
 568	case e1000_82541:
 569	case e1000_82541_rev_2:
 570		legacy_pba_adjust = true;
 571		pba = E1000_PBA_48K;
 572		break;
 573	case e1000_82545:
 574	case e1000_82545_rev_3:
 575	case e1000_82546:
 576	case e1000_ce4100:
 577	case e1000_82546_rev_3:
 578		pba = E1000_PBA_48K;
 579		break;
 580	case e1000_82547:
 581	case e1000_82547_rev_2:
 582		legacy_pba_adjust = true;
 583		pba = E1000_PBA_30K;
 584		break;
 585	case e1000_undefined:
 586	case e1000_num_macs:
 587		break;
 588	}
 589
 590	if (legacy_pba_adjust) {
 591		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 592			pba -= 8; /* allocate more FIFO for Tx */
 593
 594		if (hw->mac_type == e1000_82547) {
 595			adapter->tx_fifo_head = 0;
 596			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 597			adapter->tx_fifo_size =
 598				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 599			atomic_set(&adapter->tx_fifo_stall, 0);
 600		}
 601	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 602		/* adjust PBA for jumbo frames */
 603		ew32(PBA, pba);
 604
 605		/* To maintain wire speed transmits, the Tx FIFO should be
 606		 * large enough to accommodate two full transmit packets,
 607		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 608		 * the Rx FIFO should be large enough to accommodate at least
 609		 * one full receive packet and is similarly rounded up and
 610		 * expressed in KB.
 611		 */
 612		pba = er32(PBA);
 613		/* upper 16 bits has Tx packet buffer allocation size in KB */
 614		tx_space = pba >> 16;
 615		/* lower 16 bits has Rx packet buffer allocation size in KB */
 616		pba &= 0xffff;
 617		/* the Tx fifo also stores 16 bytes of information about the Tx
 618		 * but don't include ethernet FCS because hardware appends it
 619		 */
 620		min_tx_space = (hw->max_frame_size +
 621				sizeof(struct e1000_tx_desc) -
 622				ETH_FCS_LEN) * 2;
 623		min_tx_space = ALIGN(min_tx_space, 1024);
 624		min_tx_space >>= 10;
 625		/* software strips receive CRC, so leave room for it */
 626		min_rx_space = hw->max_frame_size;
 627		min_rx_space = ALIGN(min_rx_space, 1024);
 628		min_rx_space >>= 10;
 629
 630		/* If current Tx allocation is less than the min Tx FIFO size,
 631		 * and the min Tx FIFO size is less than the current Rx FIFO
 632		 * allocation, take space away from current Rx allocation
 633		 */
 634		if (tx_space < min_tx_space &&
 635		    ((min_tx_space - tx_space) < pba)) {
 636			pba = pba - (min_tx_space - tx_space);
 637
 638			/* PCI/PCIx hardware has PBA alignment constraints */
 639			switch (hw->mac_type) {
 640			case e1000_82545 ... e1000_82546_rev_3:
 641				pba &= ~(E1000_PBA_8K - 1);
 642				break;
 643			default:
 644				break;
 645			}
 646
 647			/* if short on Rx space, Rx wins and must trump Tx
 648			 * adjustment or use Early Receive if available
 649			 */
 650			if (pba < min_rx_space)
 651				pba = min_rx_space;
 652		}
 653	}
 654
 655	ew32(PBA, pba);
 656
 657	/* flow control settings:
 658	 * The high water mark must be low enough to fit one full frame
 659	 * (or the size used for early receive) above it in the Rx FIFO.
 660	 * Set it to the lower of:
 661	 * - 90% of the Rx FIFO size, and
 662	 * - the full Rx FIFO size minus the early receive size (for parts
 663	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 664	 * - the full Rx FIFO size minus one full frame
 665	 */
 666	hwm = min(((pba << 10) * 9 / 10),
 667		  ((pba << 10) - hw->max_frame_size));
 668
 669	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 670	hw->fc_low_water = hw->fc_high_water - 8;
 671	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 672	hw->fc_send_xon = 1;
 673	hw->fc = hw->original_fc;
 674
 675	/* Allow time for pending master requests to run */
 676	e1000_reset_hw(hw);
 677	if (hw->mac_type >= e1000_82544)
 678		ew32(WUC, 0);
 679
 680	if (e1000_init_hw(hw))
 681		e_dev_err("Hardware Error\n");
 682	e1000_update_mng_vlan(adapter);
 683
 684	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 685	if (hw->mac_type >= e1000_82544 &&
 686	    hw->autoneg == 1 &&
 687	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 688		u32 ctrl = er32(CTRL);
 689		/* clear phy power management bit if we are in gig only mode,
 690		 * which if enabled will attempt negotiation to 100Mb, which
 691		 * can cause a loss of link at power off or driver unload
 692		 */
 693		ctrl &= ~E1000_CTRL_SWDPIN3;
 694		ew32(CTRL, ctrl);
 695	}
 696
 697	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 698	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 699
 700	e1000_reset_adaptive(hw);
 701	e1000_phy_get_info(hw, &adapter->phy_info);
 702
 703	e1000_release_manageability(adapter);
 704}
 705
 706/* Dump the eeprom for users having checksum issues */
 707static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 708{
 709	struct net_device *netdev = adapter->netdev;
 710	struct ethtool_eeprom eeprom;
 711	const struct ethtool_ops *ops = netdev->ethtool_ops;
 712	u8 *data;
 713	int i;
 714	u16 csum_old, csum_new = 0;
 715
 716	eeprom.len = ops->get_eeprom_len(netdev);
 717	eeprom.offset = 0;
 718
 719	data = kmalloc(eeprom.len, GFP_KERNEL);
 720	if (!data)
 721		return;
 722
 723	ops->get_eeprom(netdev, &eeprom, data);
 724
 725	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 726		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 727	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 728		csum_new += data[i] + (data[i + 1] << 8);
 729	csum_new = EEPROM_SUM - csum_new;
 730
 731	pr_err("/*********************/\n");
 732	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 733	pr_err("Calculated              : 0x%04x\n", csum_new);
 734
 735	pr_err("Offset    Values\n");
 736	pr_err("========  ======\n");
 737	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 738
 739	pr_err("Include this output when contacting your support provider.\n");
 740	pr_err("This is not a software error! Something bad happened to\n");
 741	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 742	pr_err("result in further problems, possibly loss of data,\n");
 743	pr_err("corruption or system hangs!\n");
 744	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 745	pr_err("which is invalid and requires you to set the proper MAC\n");
 746	pr_err("address manually before continuing to enable this network\n");
 747	pr_err("device. Please inspect the EEPROM dump and report the\n");
 748	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 749	pr_err("/*********************/\n");
 750
 751	kfree(data);
 752}
 753
 754/**
 755 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 756 * @pdev: PCI device information struct
 757 *
 758 * Return true if an adapter needs ioport resources
 759 **/
 760static int e1000_is_need_ioport(struct pci_dev *pdev)
 761{
 762	switch (pdev->device) {
 763	case E1000_DEV_ID_82540EM:
 764	case E1000_DEV_ID_82540EM_LOM:
 765	case E1000_DEV_ID_82540EP:
 766	case E1000_DEV_ID_82540EP_LOM:
 767	case E1000_DEV_ID_82540EP_LP:
 768	case E1000_DEV_ID_82541EI:
 769	case E1000_DEV_ID_82541EI_MOBILE:
 770	case E1000_DEV_ID_82541ER:
 771	case E1000_DEV_ID_82541ER_LOM:
 772	case E1000_DEV_ID_82541GI:
 773	case E1000_DEV_ID_82541GI_LF:
 774	case E1000_DEV_ID_82541GI_MOBILE:
 775	case E1000_DEV_ID_82544EI_COPPER:
 776	case E1000_DEV_ID_82544EI_FIBER:
 777	case E1000_DEV_ID_82544GC_COPPER:
 778	case E1000_DEV_ID_82544GC_LOM:
 779	case E1000_DEV_ID_82545EM_COPPER:
 780	case E1000_DEV_ID_82545EM_FIBER:
 781	case E1000_DEV_ID_82546EB_COPPER:
 782	case E1000_DEV_ID_82546EB_FIBER:
 783	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 784		return true;
 785	default:
 786		return false;
 787	}
 788}
 789
 790static netdev_features_t e1000_fix_features(struct net_device *netdev,
 791	netdev_features_t features)
 792{
 793	/* Since there is no support for separate Rx/Tx vlan accel
 794	 * enable/disable make sure Tx flag is always in same state as Rx.
 795	 */
 796	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 797		features |= NETIF_F_HW_VLAN_CTAG_TX;
 798	else
 799		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 800
 801	return features;
 802}
 803
 804static int e1000_set_features(struct net_device *netdev,
 805	netdev_features_t features)
 806{
 807	struct e1000_adapter *adapter = netdev_priv(netdev);
 808	netdev_features_t changed = features ^ netdev->features;
 809
 810	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 811		e1000_vlan_mode(netdev, features);
 812
 813	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 814		return 0;
 815
 816	netdev->features = features;
 817	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 818
 819	if (netif_running(netdev))
 820		e1000_reinit_locked(adapter);
 821	else
 822		e1000_reset(adapter);
 823
 824	return 1;
 825}
 826
 827static const struct net_device_ops e1000_netdev_ops = {
 828	.ndo_open		= e1000_open,
 829	.ndo_stop		= e1000_close,
 830	.ndo_start_xmit		= e1000_xmit_frame,
 831	.ndo_set_rx_mode	= e1000_set_rx_mode,
 832	.ndo_set_mac_address	= e1000_set_mac,
 833	.ndo_tx_timeout		= e1000_tx_timeout,
 834	.ndo_change_mtu		= e1000_change_mtu,
 835	.ndo_eth_ioctl		= e1000_ioctl,
 836	.ndo_validate_addr	= eth_validate_addr,
 837	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 838	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 839#ifdef CONFIG_NET_POLL_CONTROLLER
 840	.ndo_poll_controller	= e1000_netpoll,
 841#endif
 842	.ndo_fix_features	= e1000_fix_features,
 843	.ndo_set_features	= e1000_set_features,
 844};
 845
 846/**
 847 * e1000_init_hw_struct - initialize members of hw struct
 848 * @adapter: board private struct
 849 * @hw: structure used by e1000_hw.c
 850 *
 851 * Factors out initialization of the e1000_hw struct to its own function
 852 * that can be called very early at init (just after struct allocation).
 853 * Fields are initialized based on PCI device information and
 854 * OS network device settings (MTU size).
 855 * Returns negative error codes if MAC type setup fails.
 856 */
 857static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 858				struct e1000_hw *hw)
 859{
 860	struct pci_dev *pdev = adapter->pdev;
 861
 862	/* PCI config space info */
 863	hw->vendor_id = pdev->vendor;
 864	hw->device_id = pdev->device;
 865	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 866	hw->subsystem_id = pdev->subsystem_device;
 867	hw->revision_id = pdev->revision;
 868
 869	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 870
 871	hw->max_frame_size = adapter->netdev->mtu +
 872			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 873	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 874
 875	/* identify the MAC */
 876	if (e1000_set_mac_type(hw)) {
 877		e_err(probe, "Unknown MAC Type\n");
 878		return -EIO;
 879	}
 880
 881	switch (hw->mac_type) {
 882	default:
 883		break;
 884	case e1000_82541:
 885	case e1000_82547:
 886	case e1000_82541_rev_2:
 887	case e1000_82547_rev_2:
 888		hw->phy_init_script = 1;
 889		break;
 890	}
 891
 892	e1000_set_media_type(hw);
 893	e1000_get_bus_info(hw);
 894
 895	hw->wait_autoneg_complete = false;
 896	hw->tbi_compatibility_en = true;
 897	hw->adaptive_ifs = true;
 898
 899	/* Copper options */
 900
 901	if (hw->media_type == e1000_media_type_copper) {
 902		hw->mdix = AUTO_ALL_MODES;
 903		hw->disable_polarity_correction = false;
 904		hw->master_slave = E1000_MASTER_SLAVE;
 905	}
 906
 907	return 0;
 908}
 909
 910/**
 911 * e1000_probe - Device Initialization Routine
 912 * @pdev: PCI device information struct
 913 * @ent: entry in e1000_pci_tbl
 914 *
 915 * Returns 0 on success, negative on failure
 916 *
 917 * e1000_probe initializes an adapter identified by a pci_dev structure.
 918 * The OS initialization, configuring of the adapter private structure,
 919 * and a hardware reset occur.
 920 **/
 921static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 922{
 923	struct net_device *netdev;
 924	struct e1000_adapter *adapter = NULL;
 925	struct e1000_hw *hw;
 926
 927	static int cards_found;
 928	static int global_quad_port_a; /* global ksp3 port a indication */
 929	int i, err, pci_using_dac;
 930	u16 eeprom_data = 0;
 931	u16 tmp = 0;
 932	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 933	int bars, need_ioport;
 934	bool disable_dev = false;
 935
 936	/* do not allocate ioport bars when not needed */
 937	need_ioport = e1000_is_need_ioport(pdev);
 938	if (need_ioport) {
 939		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 940		err = pci_enable_device(pdev);
 941	} else {
 942		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 943		err = pci_enable_device_mem(pdev);
 944	}
 945	if (err)
 946		return err;
 947
 948	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 949	if (err)
 950		goto err_pci_reg;
 951
 952	pci_set_master(pdev);
 953	err = pci_save_state(pdev);
 954	if (err)
 955		goto err_alloc_etherdev;
 956
 957	err = -ENOMEM;
 958	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 959	if (!netdev)
 960		goto err_alloc_etherdev;
 961
 962	SET_NETDEV_DEV(netdev, &pdev->dev);
 963
 964	pci_set_drvdata(pdev, netdev);
 965	adapter = netdev_priv(netdev);
 966	adapter->netdev = netdev;
 967	adapter->pdev = pdev;
 968	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 969	adapter->bars = bars;
 970	adapter->need_ioport = need_ioport;
 971
 972	hw = &adapter->hw;
 973	hw->back = adapter;
 974
 975	err = -EIO;
 976	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 977	if (!hw->hw_addr)
 978		goto err_ioremap;
 979
 980	if (adapter->need_ioport) {
 981		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 982			if (pci_resource_len(pdev, i) == 0)
 983				continue;
 984			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 985				hw->io_base = pci_resource_start(pdev, i);
 986				break;
 987			}
 988		}
 989	}
 990
 991	/* make ready for any if (hw->...) below */
 992	err = e1000_init_hw_struct(adapter, hw);
 993	if (err)
 994		goto err_sw_init;
 995
 996	/* there is a workaround being applied below that limits
 997	 * 64-bit DMA addresses to 64-bit hardware.  There are some
 998	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
 999	 */
1000	pci_using_dac = 0;
1001	if ((hw->bus_type == e1000_bus_type_pcix) &&
1002	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003		pci_using_dac = 1;
1004	} else {
1005		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006		if (err) {
1007			pr_err("No usable DMA config, aborting\n");
1008			goto err_dma;
1009		}
1010	}
1011
1012	netdev->netdev_ops = &e1000_netdev_ops;
1013	e1000_set_ethtool_ops(netdev);
1014	netdev->watchdog_timeo = 5 * HZ;
1015	netif_napi_add(netdev, &adapter->napi, e1000_clean);
1016
1017	strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1018
1019	adapter->bd_number = cards_found;
1020
1021	/* setup the private structure */
1022
1023	err = e1000_sw_init(adapter);
1024	if (err)
1025		goto err_sw_init;
1026
1027	err = -EIO;
1028	if (hw->mac_type == e1000_ce4100) {
1029		hw->ce4100_gbe_mdio_base_virt =
1030					ioremap(pci_resource_start(pdev, BAR_1),
1031						pci_resource_len(pdev, BAR_1));
1032
1033		if (!hw->ce4100_gbe_mdio_base_virt)
1034			goto err_mdio_ioremap;
1035	}
1036
1037	if (hw->mac_type >= e1000_82543) {
1038		netdev->hw_features = NETIF_F_SG |
1039				   NETIF_F_HW_CSUM |
1040				   NETIF_F_HW_VLAN_CTAG_RX;
1041		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042				   NETIF_F_HW_VLAN_CTAG_FILTER;
1043	}
1044
1045	if ((hw->mac_type >= e1000_82544) &&
1046	   (hw->mac_type != e1000_82547))
1047		netdev->hw_features |= NETIF_F_TSO;
1048
1049	netdev->priv_flags |= IFF_SUPP_NOFCS;
1050
1051	netdev->features |= netdev->hw_features;
1052	netdev->hw_features |= (NETIF_F_RXCSUM |
1053				NETIF_F_RXALL |
1054				NETIF_F_RXFCS);
1055
1056	if (pci_using_dac) {
1057		netdev->features |= NETIF_F_HIGHDMA;
1058		netdev->vlan_features |= NETIF_F_HIGHDMA;
1059	}
1060
1061	netdev->vlan_features |= (NETIF_F_TSO |
1062				  NETIF_F_HW_CSUM |
1063				  NETIF_F_SG);
1064
1065	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068		netdev->priv_flags |= IFF_UNICAST_FLT;
1069
1070	/* MTU range: 46 - 16110 */
1071	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073
1074	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075
1076	/* initialize eeprom parameters */
1077	if (e1000_init_eeprom_params(hw)) {
1078		e_err(probe, "EEPROM initialization failed\n");
1079		goto err_eeprom;
1080	}
1081
1082	/* before reading the EEPROM, reset the controller to
1083	 * put the device in a known good starting state
1084	 */
1085
1086	e1000_reset_hw(hw);
1087
1088	/* make sure the EEPROM is good */
1089	if (e1000_validate_eeprom_checksum(hw) < 0) {
1090		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091		e1000_dump_eeprom(adapter);
1092		/* set MAC address to all zeroes to invalidate and temporary
1093		 * disable this device for the user. This blocks regular
1094		 * traffic while still permitting ethtool ioctls from reaching
1095		 * the hardware as well as allowing the user to run the
1096		 * interface after manually setting a hw addr using
1097		 * `ip set address`
1098		 */
1099		memset(hw->mac_addr, 0, netdev->addr_len);
1100	} else {
1101		/* copy the MAC address out of the EEPROM */
1102		if (e1000_read_mac_addr(hw))
1103			e_err(probe, "EEPROM Read Error\n");
1104	}
1105	/* don't block initialization here due to bad MAC address */
1106	eth_hw_addr_set(netdev, hw->mac_addr);
1107
1108	if (!is_valid_ether_addr(netdev->dev_addr))
1109		e_err(probe, "Invalid MAC Address\n");
1110
1111
1112	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114			  e1000_82547_tx_fifo_stall_task);
1115	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117
1118	e1000_check_options(adapter);
1119
1120	/* Initial Wake on LAN setting
1121	 * If APM wake is enabled in the EEPROM,
1122	 * enable the ACPI Magic Packet filter
1123	 */
1124
1125	switch (hw->mac_type) {
1126	case e1000_82542_rev2_0:
1127	case e1000_82542_rev2_1:
1128	case e1000_82543:
1129		break;
1130	case e1000_82544:
1131		e1000_read_eeprom(hw,
1132			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134		break;
1135	case e1000_82546:
1136	case e1000_82546_rev_3:
1137		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138			e1000_read_eeprom(hw,
1139				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140			break;
1141		}
1142		fallthrough;
1143	default:
1144		e1000_read_eeprom(hw,
1145			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146		break;
1147	}
1148	if (eeprom_data & eeprom_apme_mask)
1149		adapter->eeprom_wol |= E1000_WUFC_MAG;
1150
1151	/* now that we have the eeprom settings, apply the special cases
1152	 * where the eeprom may be wrong or the board simply won't support
1153	 * wake on lan on a particular port
1154	 */
1155	switch (pdev->device) {
1156	case E1000_DEV_ID_82546GB_PCIE:
1157		adapter->eeprom_wol = 0;
1158		break;
1159	case E1000_DEV_ID_82546EB_FIBER:
1160	case E1000_DEV_ID_82546GB_FIBER:
1161		/* Wake events only supported on port A for dual fiber
1162		 * regardless of eeprom setting
1163		 */
1164		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165			adapter->eeprom_wol = 0;
1166		break;
1167	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168		/* if quad port adapter, disable WoL on all but port A */
1169		if (global_quad_port_a != 0)
1170			adapter->eeprom_wol = 0;
1171		else
1172			adapter->quad_port_a = true;
1173		/* Reset for multiple quad port adapters */
1174		if (++global_quad_port_a == 4)
1175			global_quad_port_a = 0;
1176		break;
1177	}
1178
1179	/* initialize the wol settings based on the eeprom settings */
1180	adapter->wol = adapter->eeprom_wol;
1181	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182
1183	/* Auto detect PHY address */
1184	if (hw->mac_type == e1000_ce4100) {
1185		for (i = 0; i < 32; i++) {
1186			hw->phy_addr = i;
1187			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188
1189			if (tmp != 0 && tmp != 0xFF)
1190				break;
1191		}
1192
1193		if (i >= 32)
1194			goto err_eeprom;
1195	}
1196
1197	/* reset the hardware with the new settings */
1198	e1000_reset(adapter);
1199
1200	strcpy(netdev->name, "eth%d");
1201	err = register_netdev(netdev);
1202	if (err)
1203		goto err_register;
1204
1205	e1000_vlan_filter_on_off(adapter, false);
1206
1207	/* print bus type/speed/width info */
1208	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215	       netdev->dev_addr);
1216
1217	/* carrier off reporting is important to ethtool even BEFORE open */
1218	netif_carrier_off(netdev);
1219
1220	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221
1222	cards_found++;
1223	return 0;
1224
1225err_register:
1226err_eeprom:
1227	e1000_phy_hw_reset(hw);
1228
1229	if (hw->flash_address)
1230		iounmap(hw->flash_address);
1231	kfree(adapter->tx_ring);
1232	kfree(adapter->rx_ring);
1233err_dma:
1234err_sw_init:
1235err_mdio_ioremap:
1236	iounmap(hw->ce4100_gbe_mdio_base_virt);
1237	iounmap(hw->hw_addr);
1238err_ioremap:
1239	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240	free_netdev(netdev);
1241err_alloc_etherdev:
1242	pci_release_selected_regions(pdev, bars);
1243err_pci_reg:
1244	if (!adapter || disable_dev)
1245		pci_disable_device(pdev);
1246	return err;
1247}
1248
1249/**
1250 * e1000_remove - Device Removal Routine
1251 * @pdev: PCI device information struct
1252 *
1253 * e1000_remove is called by the PCI subsystem to alert the driver
1254 * that it should release a PCI device. That could be caused by a
1255 * Hot-Plug event, or because the driver is going to be removed from
1256 * memory.
1257 **/
1258static void e1000_remove(struct pci_dev *pdev)
1259{
1260	struct net_device *netdev = pci_get_drvdata(pdev);
1261	struct e1000_adapter *adapter = netdev_priv(netdev);
1262	struct e1000_hw *hw = &adapter->hw;
1263	bool disable_dev;
1264
1265	e1000_down_and_stop(adapter);
1266	e1000_release_manageability(adapter);
1267
1268	unregister_netdev(netdev);
1269
1270	e1000_phy_hw_reset(hw);
1271
1272	kfree(adapter->tx_ring);
1273	kfree(adapter->rx_ring);
1274
1275	if (hw->mac_type == e1000_ce4100)
1276		iounmap(hw->ce4100_gbe_mdio_base_virt);
1277	iounmap(hw->hw_addr);
1278	if (hw->flash_address)
1279		iounmap(hw->flash_address);
1280	pci_release_selected_regions(pdev, adapter->bars);
1281
1282	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283	free_netdev(netdev);
1284
1285	if (disable_dev)
1286		pci_disable_device(pdev);
1287}
1288
1289/**
1290 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291 * @adapter: board private structure to initialize
1292 *
1293 * e1000_sw_init initializes the Adapter private data structure.
1294 * e1000_init_hw_struct MUST be called before this function
1295 **/
1296static int e1000_sw_init(struct e1000_adapter *adapter)
1297{
1298	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299
1300	adapter->num_tx_queues = 1;
1301	adapter->num_rx_queues = 1;
1302
1303	if (e1000_alloc_queues(adapter)) {
1304		e_err(probe, "Unable to allocate memory for queues\n");
1305		return -ENOMEM;
1306	}
1307
1308	/* Explicitly disable IRQ since the NIC can be in any state. */
1309	e1000_irq_disable(adapter);
1310
1311	spin_lock_init(&adapter->stats_lock);
1312
1313	set_bit(__E1000_DOWN, &adapter->flags);
1314
1315	return 0;
1316}
1317
1318/**
1319 * e1000_alloc_queues - Allocate memory for all rings
1320 * @adapter: board private structure to initialize
1321 *
1322 * We allocate one ring per queue at run-time since we don't know the
1323 * number of queues at compile-time.
1324 **/
1325static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326{
1327	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329	if (!adapter->tx_ring)
1330		return -ENOMEM;
1331
1332	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334	if (!adapter->rx_ring) {
1335		kfree(adapter->tx_ring);
1336		return -ENOMEM;
1337	}
1338
1339	return E1000_SUCCESS;
1340}
1341
1342/**
1343 * e1000_open - Called when a network interface is made active
1344 * @netdev: network interface device structure
1345 *
1346 * Returns 0 on success, negative value on failure
1347 *
1348 * The open entry point is called when a network interface is made
1349 * active by the system (IFF_UP).  At this point all resources needed
1350 * for transmit and receive operations are allocated, the interrupt
1351 * handler is registered with the OS, the watchdog task is started,
1352 * and the stack is notified that the interface is ready.
1353 **/
1354int e1000_open(struct net_device *netdev)
1355{
1356	struct e1000_adapter *adapter = netdev_priv(netdev);
1357	struct e1000_hw *hw = &adapter->hw;
1358	int err;
1359
1360	/* disallow open during test */
1361	if (test_bit(__E1000_TESTING, &adapter->flags))
1362		return -EBUSY;
1363
1364	netif_carrier_off(netdev);
1365
1366	/* allocate transmit descriptors */
1367	err = e1000_setup_all_tx_resources(adapter);
1368	if (err)
1369		goto err_setup_tx;
1370
1371	/* allocate receive descriptors */
1372	err = e1000_setup_all_rx_resources(adapter);
1373	if (err)
1374		goto err_setup_rx;
1375
1376	e1000_power_up_phy(adapter);
1377
1378	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379	if ((hw->mng_cookie.status &
1380			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381		e1000_update_mng_vlan(adapter);
1382	}
1383
1384	/* before we allocate an interrupt, we must be ready to handle it.
1385	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386	 * as soon as we call pci_request_irq, so we have to setup our
1387	 * clean_rx handler before we do so.
1388	 */
1389	e1000_configure(adapter);
1390
1391	err = e1000_request_irq(adapter);
1392	if (err)
1393		goto err_req_irq;
1394
1395	/* From here on the code is the same as e1000_up() */
1396	clear_bit(__E1000_DOWN, &adapter->flags);
1397
1398	napi_enable(&adapter->napi);
1399
1400	e1000_irq_enable(adapter);
1401
1402	netif_start_queue(netdev);
1403
1404	/* fire a link status change interrupt to start the watchdog */
1405	ew32(ICS, E1000_ICS_LSC);
1406
1407	return E1000_SUCCESS;
1408
1409err_req_irq:
1410	e1000_power_down_phy(adapter);
1411	e1000_free_all_rx_resources(adapter);
1412err_setup_rx:
1413	e1000_free_all_tx_resources(adapter);
1414err_setup_tx:
1415	e1000_reset(adapter);
1416
1417	return err;
1418}
1419
1420/**
1421 * e1000_close - Disables a network interface
1422 * @netdev: network interface device structure
1423 *
1424 * Returns 0, this is not allowed to fail
1425 *
1426 * The close entry point is called when an interface is de-activated
1427 * by the OS.  The hardware is still under the drivers control, but
1428 * needs to be disabled.  A global MAC reset is issued to stop the
1429 * hardware, and all transmit and receive resources are freed.
1430 **/
1431int e1000_close(struct net_device *netdev)
1432{
1433	struct e1000_adapter *adapter = netdev_priv(netdev);
1434	struct e1000_hw *hw = &adapter->hw;
1435	int count = E1000_CHECK_RESET_COUNT;
1436
1437	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438		usleep_range(10000, 20000);
1439
1440	WARN_ON(count < 0);
1441
1442	/* signal that we're down so that the reset task will no longer run */
1443	set_bit(__E1000_DOWN, &adapter->flags);
1444	clear_bit(__E1000_RESETTING, &adapter->flags);
1445
1446	e1000_down(adapter);
1447	e1000_power_down_phy(adapter);
1448	e1000_free_irq(adapter);
1449
1450	e1000_free_all_tx_resources(adapter);
1451	e1000_free_all_rx_resources(adapter);
1452
1453	/* kill manageability vlan ID if supported, but not if a vlan with
1454	 * the same ID is registered on the host OS (let 8021q kill it)
1455	 */
1456	if ((hw->mng_cookie.status &
1457	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460				       adapter->mng_vlan_id);
1461	}
1462
1463	return 0;
1464}
1465
1466/**
1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468 * @adapter: address of board private structure
1469 * @start: address of beginning of memory
1470 * @len: length of memory
1471 **/
1472static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473				  unsigned long len)
1474{
1475	struct e1000_hw *hw = &adapter->hw;
1476	unsigned long begin = (unsigned long)start;
1477	unsigned long end = begin + len;
1478
1479	/* First rev 82545 and 82546 need to not allow any memory
1480	 * write location to cross 64k boundary due to errata 23
1481	 */
1482	if (hw->mac_type == e1000_82545 ||
1483	    hw->mac_type == e1000_ce4100 ||
1484	    hw->mac_type == e1000_82546) {
1485		return ((begin ^ (end - 1)) >> 16) == 0;
1486	}
1487
1488	return true;
1489}
1490
1491/**
1492 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493 * @adapter: board private structure
1494 * @txdr:    tx descriptor ring (for a specific queue) to setup
1495 *
1496 * Return 0 on success, negative on failure
1497 **/
1498static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499				    struct e1000_tx_ring *txdr)
1500{
1501	struct pci_dev *pdev = adapter->pdev;
1502	int size;
1503
1504	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505	txdr->buffer_info = vzalloc(size);
1506	if (!txdr->buffer_info)
1507		return -ENOMEM;
1508
1509	/* round up to nearest 4K */
1510
1511	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512	txdr->size = ALIGN(txdr->size, 4096);
1513
1514	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515					GFP_KERNEL);
1516	if (!txdr->desc) {
1517setup_tx_desc_die:
1518		vfree(txdr->buffer_info);
1519		return -ENOMEM;
1520	}
1521
1522	/* Fix for errata 23, can't cross 64kB boundary */
1523	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524		void *olddesc = txdr->desc;
1525		dma_addr_t olddma = txdr->dma;
1526		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527		      txdr->size, txdr->desc);
1528		/* Try again, without freeing the previous */
1529		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530						&txdr->dma, GFP_KERNEL);
1531		/* Failed allocation, critical failure */
1532		if (!txdr->desc) {
1533			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534					  olddma);
1535			goto setup_tx_desc_die;
1536		}
1537
1538		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539			/* give up */
1540			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541					  txdr->dma);
1542			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543					  olddma);
1544			e_err(probe, "Unable to allocate aligned memory "
1545			      "for the transmit descriptor ring\n");
1546			vfree(txdr->buffer_info);
1547			return -ENOMEM;
1548		} else {
1549			/* Free old allocation, new allocation was successful */
1550			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551					  olddma);
1552		}
1553	}
1554	memset(txdr->desc, 0, txdr->size);
1555
1556	txdr->next_to_use = 0;
1557	txdr->next_to_clean = 0;
1558
1559	return 0;
1560}
1561
1562/**
1563 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564 * 				  (Descriptors) for all queues
1565 * @adapter: board private structure
1566 *
1567 * Return 0 on success, negative on failure
1568 **/
1569int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570{
1571	int i, err = 0;
1572
1573	for (i = 0; i < adapter->num_tx_queues; i++) {
1574		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575		if (err) {
1576			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577			for (i-- ; i >= 0; i--)
1578				e1000_free_tx_resources(adapter,
1579							&adapter->tx_ring[i]);
1580			break;
1581		}
1582	}
1583
1584	return err;
1585}
1586
1587/**
1588 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589 * @adapter: board private structure
1590 *
1591 * Configure the Tx unit of the MAC after a reset.
1592 **/
1593static void e1000_configure_tx(struct e1000_adapter *adapter)
1594{
1595	u64 tdba;
1596	struct e1000_hw *hw = &adapter->hw;
1597	u32 tdlen, tctl, tipg;
1598	u32 ipgr1, ipgr2;
1599
1600	/* Setup the HW Tx Head and Tail descriptor pointers */
1601
1602	switch (adapter->num_tx_queues) {
1603	case 1:
1604	default:
1605		tdba = adapter->tx_ring[0].dma;
1606		tdlen = adapter->tx_ring[0].count *
1607			sizeof(struct e1000_tx_desc);
1608		ew32(TDLEN, tdlen);
1609		ew32(TDBAH, (tdba >> 32));
1610		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611		ew32(TDT, 0);
1612		ew32(TDH, 0);
1613		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614					   E1000_TDH : E1000_82542_TDH);
1615		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616					   E1000_TDT : E1000_82542_TDT);
1617		break;
1618	}
1619
1620	/* Set the default values for the Tx Inter Packet Gap timer */
1621	if ((hw->media_type == e1000_media_type_fiber ||
1622	     hw->media_type == e1000_media_type_internal_serdes))
1623		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624	else
1625		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626
1627	switch (hw->mac_type) {
1628	case e1000_82542_rev2_0:
1629	case e1000_82542_rev2_1:
1630		tipg = DEFAULT_82542_TIPG_IPGT;
1631		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633		break;
1634	default:
1635		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637		break;
1638	}
1639	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641	ew32(TIPG, tipg);
1642
1643	/* Set the Tx Interrupt Delay register */
1644
1645	ew32(TIDV, adapter->tx_int_delay);
1646	if (hw->mac_type >= e1000_82540)
1647		ew32(TADV, adapter->tx_abs_int_delay);
1648
1649	/* Program the Transmit Control Register */
1650
1651	tctl = er32(TCTL);
1652	tctl &= ~E1000_TCTL_CT;
1653	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655
1656	e1000_config_collision_dist(hw);
1657
1658	/* Setup Transmit Descriptor Settings for eop descriptor */
1659	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660
1661	/* only set IDE if we are delaying interrupts using the timers */
1662	if (adapter->tx_int_delay)
1663		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664
1665	if (hw->mac_type < e1000_82543)
1666		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667	else
1668		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669
1670	/* Cache if we're 82544 running in PCI-X because we'll
1671	 * need this to apply a workaround later in the send path.
1672	 */
1673	if (hw->mac_type == e1000_82544 &&
1674	    hw->bus_type == e1000_bus_type_pcix)
1675		adapter->pcix_82544 = true;
1676
1677	ew32(TCTL, tctl);
1678
1679}
1680
1681/**
1682 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683 * @adapter: board private structure
1684 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1685 *
1686 * Returns 0 on success, negative on failure
1687 **/
1688static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689				    struct e1000_rx_ring *rxdr)
1690{
1691	struct pci_dev *pdev = adapter->pdev;
1692	int size, desc_len;
1693
1694	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695	rxdr->buffer_info = vzalloc(size);
1696	if (!rxdr->buffer_info)
1697		return -ENOMEM;
1698
1699	desc_len = sizeof(struct e1000_rx_desc);
1700
1701	/* Round up to nearest 4K */
1702
1703	rxdr->size = rxdr->count * desc_len;
1704	rxdr->size = ALIGN(rxdr->size, 4096);
1705
1706	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707					GFP_KERNEL);
1708	if (!rxdr->desc) {
1709setup_rx_desc_die:
1710		vfree(rxdr->buffer_info);
1711		return -ENOMEM;
1712	}
1713
1714	/* Fix for errata 23, can't cross 64kB boundary */
1715	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716		void *olddesc = rxdr->desc;
1717		dma_addr_t olddma = rxdr->dma;
1718		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719		      rxdr->size, rxdr->desc);
1720		/* Try again, without freeing the previous */
1721		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722						&rxdr->dma, GFP_KERNEL);
1723		/* Failed allocation, critical failure */
1724		if (!rxdr->desc) {
1725			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726					  olddma);
1727			goto setup_rx_desc_die;
1728		}
1729
1730		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731			/* give up */
1732			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733					  rxdr->dma);
1734			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735					  olddma);
1736			e_err(probe, "Unable to allocate aligned memory for "
1737			      "the Rx descriptor ring\n");
1738			goto setup_rx_desc_die;
1739		} else {
1740			/* Free old allocation, new allocation was successful */
1741			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742					  olddma);
1743		}
1744	}
1745	memset(rxdr->desc, 0, rxdr->size);
1746
1747	rxdr->next_to_clean = 0;
1748	rxdr->next_to_use = 0;
1749	rxdr->rx_skb_top = NULL;
1750
1751	return 0;
1752}
1753
1754/**
1755 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756 * 				  (Descriptors) for all queues
1757 * @adapter: board private structure
1758 *
1759 * Return 0 on success, negative on failure
1760 **/
1761int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762{
1763	int i, err = 0;
1764
1765	for (i = 0; i < adapter->num_rx_queues; i++) {
1766		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767		if (err) {
1768			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769			for (i-- ; i >= 0; i--)
1770				e1000_free_rx_resources(adapter,
1771							&adapter->rx_ring[i]);
1772			break;
1773		}
1774	}
1775
1776	return err;
1777}
1778
1779/**
1780 * e1000_setup_rctl - configure the receive control registers
1781 * @adapter: Board private structure
1782 **/
1783static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784{
1785	struct e1000_hw *hw = &adapter->hw;
1786	u32 rctl;
1787
1788	rctl = er32(RCTL);
1789
1790	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791
1792	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793		E1000_RCTL_RDMTS_HALF |
1794		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795
1796	if (hw->tbi_compatibility_on == 1)
1797		rctl |= E1000_RCTL_SBP;
1798	else
1799		rctl &= ~E1000_RCTL_SBP;
1800
1801	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802		rctl &= ~E1000_RCTL_LPE;
1803	else
1804		rctl |= E1000_RCTL_LPE;
1805
1806	/* Setup buffer sizes */
1807	rctl &= ~E1000_RCTL_SZ_4096;
1808	rctl |= E1000_RCTL_BSEX;
1809	switch (adapter->rx_buffer_len) {
1810	case E1000_RXBUFFER_2048:
1811	default:
1812		rctl |= E1000_RCTL_SZ_2048;
1813		rctl &= ~E1000_RCTL_BSEX;
1814		break;
1815	case E1000_RXBUFFER_4096:
1816		rctl |= E1000_RCTL_SZ_4096;
1817		break;
1818	case E1000_RXBUFFER_8192:
1819		rctl |= E1000_RCTL_SZ_8192;
1820		break;
1821	case E1000_RXBUFFER_16384:
1822		rctl |= E1000_RCTL_SZ_16384;
1823		break;
1824	}
1825
1826	/* This is useful for sniffing bad packets. */
1827	if (adapter->netdev->features & NETIF_F_RXALL) {
1828		/* UPE and MPE will be handled by normal PROMISC logic
1829		 * in e1000e_set_rx_mode
1830		 */
1831		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834
1835		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836			  E1000_RCTL_DPF | /* Allow filtered pause */
1837			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839		 * and that breaks VLANs.
1840		 */
1841	}
1842
1843	ew32(RCTL, rctl);
1844}
1845
1846/**
1847 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848 * @adapter: board private structure
1849 *
1850 * Configure the Rx unit of the MAC after a reset.
1851 **/
1852static void e1000_configure_rx(struct e1000_adapter *adapter)
1853{
1854	u64 rdba;
1855	struct e1000_hw *hw = &adapter->hw;
1856	u32 rdlen, rctl, rxcsum;
1857
1858	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859		rdlen = adapter->rx_ring[0].count *
1860			sizeof(struct e1000_rx_desc);
1861		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863	} else {
1864		rdlen = adapter->rx_ring[0].count *
1865			sizeof(struct e1000_rx_desc);
1866		adapter->clean_rx = e1000_clean_rx_irq;
1867		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868	}
1869
1870	/* disable receives while setting up the descriptors */
1871	rctl = er32(RCTL);
1872	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873
1874	/* set the Receive Delay Timer Register */
1875	ew32(RDTR, adapter->rx_int_delay);
1876
1877	if (hw->mac_type >= e1000_82540) {
1878		ew32(RADV, adapter->rx_abs_int_delay);
1879		if (adapter->itr_setting != 0)
1880			ew32(ITR, 1000000000 / (adapter->itr * 256));
1881	}
1882
1883	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1884	 * the Base and Length of the Rx Descriptor Ring
1885	 */
1886	switch (adapter->num_rx_queues) {
1887	case 1:
1888	default:
1889		rdba = adapter->rx_ring[0].dma;
1890		ew32(RDLEN, rdlen);
1891		ew32(RDBAH, (rdba >> 32));
1892		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893		ew32(RDT, 0);
1894		ew32(RDH, 0);
1895		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896					   E1000_RDH : E1000_82542_RDH);
1897		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898					   E1000_RDT : E1000_82542_RDT);
1899		break;
1900	}
1901
1902	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903	if (hw->mac_type >= e1000_82543) {
1904		rxcsum = er32(RXCSUM);
1905		if (adapter->rx_csum)
1906			rxcsum |= E1000_RXCSUM_TUOFL;
1907		else
1908			/* don't need to clear IPPCSE as it defaults to 0 */
1909			rxcsum &= ~E1000_RXCSUM_TUOFL;
1910		ew32(RXCSUM, rxcsum);
1911	}
1912
1913	/* Enable Receives */
1914	ew32(RCTL, rctl | E1000_RCTL_EN);
1915}
1916
1917/**
1918 * e1000_free_tx_resources - Free Tx Resources per Queue
1919 * @adapter: board private structure
1920 * @tx_ring: Tx descriptor ring for a specific queue
1921 *
1922 * Free all transmit software resources
1923 **/
1924static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925				    struct e1000_tx_ring *tx_ring)
1926{
1927	struct pci_dev *pdev = adapter->pdev;
1928
1929	e1000_clean_tx_ring(adapter, tx_ring);
1930
1931	vfree(tx_ring->buffer_info);
1932	tx_ring->buffer_info = NULL;
1933
1934	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935			  tx_ring->dma);
1936
1937	tx_ring->desc = NULL;
1938}
1939
1940/**
1941 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942 * @adapter: board private structure
1943 *
1944 * Free all transmit software resources
1945 **/
1946void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947{
1948	int i;
1949
1950	for (i = 0; i < adapter->num_tx_queues; i++)
1951		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952}
1953
1954static void
1955e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956				 struct e1000_tx_buffer *buffer_info,
1957				 int budget)
1958{
1959	if (buffer_info->dma) {
1960		if (buffer_info->mapped_as_page)
1961			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1962				       buffer_info->length, DMA_TO_DEVICE);
1963		else
1964			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1965					 buffer_info->length,
1966					 DMA_TO_DEVICE);
1967		buffer_info->dma = 0;
1968	}
1969	if (buffer_info->skb) {
1970		napi_consume_skb(buffer_info->skb, budget);
1971		buffer_info->skb = NULL;
1972	}
1973	buffer_info->time_stamp = 0;
1974	/* buffer_info must be completely set up in the transmit path */
1975}
1976
1977/**
1978 * e1000_clean_tx_ring - Free Tx Buffers
1979 * @adapter: board private structure
1980 * @tx_ring: ring to be cleaned
1981 **/
1982static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983				struct e1000_tx_ring *tx_ring)
1984{
1985	struct e1000_hw *hw = &adapter->hw;
1986	struct e1000_tx_buffer *buffer_info;
1987	unsigned long size;
1988	unsigned int i;
1989
1990	/* Free all the Tx ring sk_buffs */
1991
1992	for (i = 0; i < tx_ring->count; i++) {
1993		buffer_info = &tx_ring->buffer_info[i];
1994		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1995	}
1996
1997	netdev_reset_queue(adapter->netdev);
1998	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1999	memset(tx_ring->buffer_info, 0, size);
2000
2001	/* Zero out the descriptor ring */
2002
2003	memset(tx_ring->desc, 0, tx_ring->size);
2004
2005	tx_ring->next_to_use = 0;
2006	tx_ring->next_to_clean = 0;
2007	tx_ring->last_tx_tso = false;
2008
2009	writel(0, hw->hw_addr + tx_ring->tdh);
2010	writel(0, hw->hw_addr + tx_ring->tdt);
2011}
2012
2013/**
2014 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2015 * @adapter: board private structure
2016 **/
2017static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018{
2019	int i;
2020
2021	for (i = 0; i < adapter->num_tx_queues; i++)
2022		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023}
2024
2025/**
2026 * e1000_free_rx_resources - Free Rx Resources
2027 * @adapter: board private structure
2028 * @rx_ring: ring to clean the resources from
2029 *
2030 * Free all receive software resources
2031 **/
2032static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033				    struct e1000_rx_ring *rx_ring)
2034{
2035	struct pci_dev *pdev = adapter->pdev;
2036
2037	e1000_clean_rx_ring(adapter, rx_ring);
2038
2039	vfree(rx_ring->buffer_info);
2040	rx_ring->buffer_info = NULL;
2041
2042	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043			  rx_ring->dma);
2044
2045	rx_ring->desc = NULL;
2046}
2047
2048/**
2049 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2050 * @adapter: board private structure
2051 *
2052 * Free all receive software resources
2053 **/
2054void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2055{
2056	int i;
2057
2058	for (i = 0; i < adapter->num_rx_queues; i++)
2059		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2060}
2061
2062#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2063static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2064{
2065	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2066		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2067}
2068
2069static void *e1000_alloc_frag(const struct e1000_adapter *a)
2070{
2071	unsigned int len = e1000_frag_len(a);
2072	u8 *data = netdev_alloc_frag(len);
2073
2074	if (likely(data))
2075		data += E1000_HEADROOM;
2076	return data;
2077}
2078
2079/**
2080 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2081 * @adapter: board private structure
2082 * @rx_ring: ring to free buffers from
2083 **/
2084static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2085				struct e1000_rx_ring *rx_ring)
2086{
2087	struct e1000_hw *hw = &adapter->hw;
2088	struct e1000_rx_buffer *buffer_info;
2089	struct pci_dev *pdev = adapter->pdev;
2090	unsigned long size;
2091	unsigned int i;
2092
2093	/* Free all the Rx netfrags */
2094	for (i = 0; i < rx_ring->count; i++) {
2095		buffer_info = &rx_ring->buffer_info[i];
2096		if (adapter->clean_rx == e1000_clean_rx_irq) {
2097			if (buffer_info->dma)
2098				dma_unmap_single(&pdev->dev, buffer_info->dma,
2099						 adapter->rx_buffer_len,
2100						 DMA_FROM_DEVICE);
2101			if (buffer_info->rxbuf.data) {
2102				skb_free_frag(buffer_info->rxbuf.data);
2103				buffer_info->rxbuf.data = NULL;
2104			}
2105		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2106			if (buffer_info->dma)
2107				dma_unmap_page(&pdev->dev, buffer_info->dma,
2108					       adapter->rx_buffer_len,
2109					       DMA_FROM_DEVICE);
2110			if (buffer_info->rxbuf.page) {
2111				put_page(buffer_info->rxbuf.page);
2112				buffer_info->rxbuf.page = NULL;
2113			}
2114		}
2115
2116		buffer_info->dma = 0;
2117	}
2118
2119	/* there also may be some cached data from a chained receive */
2120	napi_free_frags(&adapter->napi);
2121	rx_ring->rx_skb_top = NULL;
2122
2123	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2124	memset(rx_ring->buffer_info, 0, size);
2125
2126	/* Zero out the descriptor ring */
2127	memset(rx_ring->desc, 0, rx_ring->size);
2128
2129	rx_ring->next_to_clean = 0;
2130	rx_ring->next_to_use = 0;
2131
2132	writel(0, hw->hw_addr + rx_ring->rdh);
2133	writel(0, hw->hw_addr + rx_ring->rdt);
2134}
2135
2136/**
2137 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2138 * @adapter: board private structure
2139 **/
2140static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2141{
2142	int i;
2143
2144	for (i = 0; i < adapter->num_rx_queues; i++)
2145		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2146}
2147
2148/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2149 * and memory write and invalidate disabled for certain operations
2150 */
2151static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2152{
2153	struct e1000_hw *hw = &adapter->hw;
2154	struct net_device *netdev = adapter->netdev;
2155	u32 rctl;
2156
2157	e1000_pci_clear_mwi(hw);
2158
2159	rctl = er32(RCTL);
2160	rctl |= E1000_RCTL_RST;
2161	ew32(RCTL, rctl);
2162	E1000_WRITE_FLUSH();
2163	mdelay(5);
2164
2165	if (netif_running(netdev))
2166		e1000_clean_all_rx_rings(adapter);
2167}
2168
2169static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2170{
2171	struct e1000_hw *hw = &adapter->hw;
2172	struct net_device *netdev = adapter->netdev;
2173	u32 rctl;
2174
2175	rctl = er32(RCTL);
2176	rctl &= ~E1000_RCTL_RST;
2177	ew32(RCTL, rctl);
2178	E1000_WRITE_FLUSH();
2179	mdelay(5);
2180
2181	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2182		e1000_pci_set_mwi(hw);
2183
2184	if (netif_running(netdev)) {
2185		/* No need to loop, because 82542 supports only 1 queue */
2186		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2187		e1000_configure_rx(adapter);
2188		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2189	}
2190}
2191
2192/**
2193 * e1000_set_mac - Change the Ethernet Address of the NIC
2194 * @netdev: network interface device structure
2195 * @p: pointer to an address structure
2196 *
2197 * Returns 0 on success, negative on failure
2198 **/
2199static int e1000_set_mac(struct net_device *netdev, void *p)
2200{
2201	struct e1000_adapter *adapter = netdev_priv(netdev);
2202	struct e1000_hw *hw = &adapter->hw;
2203	struct sockaddr *addr = p;
2204
2205	if (!is_valid_ether_addr(addr->sa_data))
2206		return -EADDRNOTAVAIL;
2207
2208	/* 82542 2.0 needs to be in reset to write receive address registers */
2209
2210	if (hw->mac_type == e1000_82542_rev2_0)
2211		e1000_enter_82542_rst(adapter);
2212
2213	eth_hw_addr_set(netdev, addr->sa_data);
2214	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2215
2216	e1000_rar_set(hw, hw->mac_addr, 0);
2217
2218	if (hw->mac_type == e1000_82542_rev2_0)
2219		e1000_leave_82542_rst(adapter);
2220
2221	return 0;
2222}
2223
2224/**
2225 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2226 * @netdev: network interface device structure
2227 *
2228 * The set_rx_mode entry point is called whenever the unicast or multicast
2229 * address lists or the network interface flags are updated. This routine is
2230 * responsible for configuring the hardware for proper unicast, multicast,
2231 * promiscuous mode, and all-multi behavior.
2232 **/
2233static void e1000_set_rx_mode(struct net_device *netdev)
2234{
2235	struct e1000_adapter *adapter = netdev_priv(netdev);
2236	struct e1000_hw *hw = &adapter->hw;
2237	struct netdev_hw_addr *ha;
2238	bool use_uc = false;
2239	u32 rctl;
2240	u32 hash_value;
2241	int i, rar_entries = E1000_RAR_ENTRIES;
2242	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2243	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2244
2245	if (!mcarray)
2246		return;
2247
2248	/* Check for Promiscuous and All Multicast modes */
2249
2250	rctl = er32(RCTL);
2251
2252	if (netdev->flags & IFF_PROMISC) {
2253		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254		rctl &= ~E1000_RCTL_VFE;
2255	} else {
2256		if (netdev->flags & IFF_ALLMULTI)
2257			rctl |= E1000_RCTL_MPE;
2258		else
2259			rctl &= ~E1000_RCTL_MPE;
2260		/* Enable VLAN filter if there is a VLAN */
2261		if (e1000_vlan_used(adapter))
2262			rctl |= E1000_RCTL_VFE;
2263	}
2264
2265	if (netdev_uc_count(netdev) > rar_entries - 1) {
2266		rctl |= E1000_RCTL_UPE;
2267	} else if (!(netdev->flags & IFF_PROMISC)) {
2268		rctl &= ~E1000_RCTL_UPE;
2269		use_uc = true;
2270	}
2271
2272	ew32(RCTL, rctl);
2273
2274	/* 82542 2.0 needs to be in reset to write receive address registers */
2275
2276	if (hw->mac_type == e1000_82542_rev2_0)
2277		e1000_enter_82542_rst(adapter);
2278
2279	/* load the first 14 addresses into the exact filters 1-14. Unicast
2280	 * addresses take precedence to avoid disabling unicast filtering
2281	 * when possible.
2282	 *
2283	 * RAR 0 is used for the station MAC address
2284	 * if there are not 14 addresses, go ahead and clear the filters
2285	 */
2286	i = 1;
2287	if (use_uc)
2288		netdev_for_each_uc_addr(ha, netdev) {
2289			if (i == rar_entries)
2290				break;
2291			e1000_rar_set(hw, ha->addr, i++);
2292		}
2293
2294	netdev_for_each_mc_addr(ha, netdev) {
2295		if (i == rar_entries) {
2296			/* load any remaining addresses into the hash table */
2297			u32 hash_reg, hash_bit, mta;
2298			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299			hash_reg = (hash_value >> 5) & 0x7F;
2300			hash_bit = hash_value & 0x1F;
2301			mta = (1 << hash_bit);
2302			mcarray[hash_reg] |= mta;
2303		} else {
2304			e1000_rar_set(hw, ha->addr, i++);
2305		}
2306	}
2307
2308	for (; i < rar_entries; i++) {
2309		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310		E1000_WRITE_FLUSH();
2311		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312		E1000_WRITE_FLUSH();
2313	}
2314
2315	/* write the hash table completely, write from bottom to avoid
2316	 * both stupid write combining chipsets, and flushing each write
2317	 */
2318	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2319		/* If we are on an 82544 has an errata where writing odd
2320		 * offsets overwrites the previous even offset, but writing
2321		 * backwards over the range solves the issue by always
2322		 * writing the odd offset first
2323		 */
2324		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325	}
2326	E1000_WRITE_FLUSH();
2327
2328	if (hw->mac_type == e1000_82542_rev2_0)
2329		e1000_leave_82542_rst(adapter);
2330
2331	kfree(mcarray);
2332}
2333
2334/**
2335 * e1000_update_phy_info_task - get phy info
2336 * @work: work struct contained inside adapter struct
2337 *
2338 * Need to wait a few seconds after link up to get diagnostic information from
2339 * the phy
2340 */
2341static void e1000_update_phy_info_task(struct work_struct *work)
2342{
2343	struct e1000_adapter *adapter = container_of(work,
2344						     struct e1000_adapter,
2345						     phy_info_task.work);
2346
2347	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2348}
2349
2350/**
2351 * e1000_82547_tx_fifo_stall_task - task to complete work
2352 * @work: work struct contained inside adapter struct
2353 **/
2354static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355{
2356	struct e1000_adapter *adapter = container_of(work,
2357						     struct e1000_adapter,
2358						     fifo_stall_task.work);
2359	struct e1000_hw *hw = &adapter->hw;
2360	struct net_device *netdev = adapter->netdev;
2361	u32 tctl;
2362
2363	if (atomic_read(&adapter->tx_fifo_stall)) {
2364		if ((er32(TDT) == er32(TDH)) &&
2365		   (er32(TDFT) == er32(TDFH)) &&
2366		   (er32(TDFTS) == er32(TDFHS))) {
2367			tctl = er32(TCTL);
2368			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2369			ew32(TDFT, adapter->tx_head_addr);
2370			ew32(TDFH, adapter->tx_head_addr);
2371			ew32(TDFTS, adapter->tx_head_addr);
2372			ew32(TDFHS, adapter->tx_head_addr);
2373			ew32(TCTL, tctl);
2374			E1000_WRITE_FLUSH();
2375
2376			adapter->tx_fifo_head = 0;
2377			atomic_set(&adapter->tx_fifo_stall, 0);
2378			netif_wake_queue(netdev);
2379		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2380			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2381		}
2382	}
2383}
2384
2385bool e1000_has_link(struct e1000_adapter *adapter)
2386{
2387	struct e1000_hw *hw = &adapter->hw;
2388	bool link_active = false;
2389
2390	/* get_link_status is set on LSC (link status) interrupt or rx
2391	 * sequence error interrupt (except on intel ce4100).
2392	 * get_link_status will stay false until the
2393	 * e1000_check_for_link establishes link for copper adapters
2394	 * ONLY
2395	 */
2396	switch (hw->media_type) {
2397	case e1000_media_type_copper:
2398		if (hw->mac_type == e1000_ce4100)
2399			hw->get_link_status = 1;
2400		if (hw->get_link_status) {
2401			e1000_check_for_link(hw);
2402			link_active = !hw->get_link_status;
2403		} else {
2404			link_active = true;
2405		}
2406		break;
2407	case e1000_media_type_fiber:
2408		e1000_check_for_link(hw);
2409		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410		break;
2411	case e1000_media_type_internal_serdes:
2412		e1000_check_for_link(hw);
2413		link_active = hw->serdes_has_link;
2414		break;
2415	default:
2416		break;
2417	}
2418
2419	return link_active;
2420}
2421
2422/**
2423 * e1000_watchdog - work function
2424 * @work: work struct contained inside adapter struct
2425 **/
2426static void e1000_watchdog(struct work_struct *work)
2427{
2428	struct e1000_adapter *adapter = container_of(work,
2429						     struct e1000_adapter,
2430						     watchdog_task.work);
2431	struct e1000_hw *hw = &adapter->hw;
2432	struct net_device *netdev = adapter->netdev;
2433	struct e1000_tx_ring *txdr = adapter->tx_ring;
2434	u32 link, tctl;
2435
2436	link = e1000_has_link(adapter);
2437	if ((netif_carrier_ok(netdev)) && link)
2438		goto link_up;
2439
2440	if (link) {
2441		if (!netif_carrier_ok(netdev)) {
2442			u32 ctrl;
2443			/* update snapshot of PHY registers on LSC */
2444			e1000_get_speed_and_duplex(hw,
2445						   &adapter->link_speed,
2446						   &adapter->link_duplex);
2447
2448			ctrl = er32(CTRL);
2449			pr_info("%s NIC Link is Up %d Mbps %s, "
2450				"Flow Control: %s\n",
2451				netdev->name,
2452				adapter->link_speed,
2453				adapter->link_duplex == FULL_DUPLEX ?
2454				"Full Duplex" : "Half Duplex",
2455				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2456				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2457				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2458				E1000_CTRL_TFCE) ? "TX" : "None")));
2459
2460			/* adjust timeout factor according to speed/duplex */
2461			adapter->tx_timeout_factor = 1;
2462			switch (adapter->link_speed) {
2463			case SPEED_10:
2464				adapter->tx_timeout_factor = 16;
2465				break;
2466			case SPEED_100:
2467				/* maybe add some timeout factor ? */
2468				break;
2469			}
2470
2471			/* enable transmits in the hardware */
2472			tctl = er32(TCTL);
2473			tctl |= E1000_TCTL_EN;
2474			ew32(TCTL, tctl);
2475
2476			netif_carrier_on(netdev);
2477			if (!test_bit(__E1000_DOWN, &adapter->flags))
2478				schedule_delayed_work(&adapter->phy_info_task,
2479						      2 * HZ);
2480			adapter->smartspeed = 0;
2481		}
2482	} else {
2483		if (netif_carrier_ok(netdev)) {
2484			adapter->link_speed = 0;
2485			adapter->link_duplex = 0;
2486			pr_info("%s NIC Link is Down\n",
2487				netdev->name);
2488			netif_carrier_off(netdev);
2489
2490			if (!test_bit(__E1000_DOWN, &adapter->flags))
2491				schedule_delayed_work(&adapter->phy_info_task,
2492						      2 * HZ);
2493		}
2494
2495		e1000_smartspeed(adapter);
2496	}
2497
2498link_up:
2499	e1000_update_stats(adapter);
2500
2501	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2502	adapter->tpt_old = adapter->stats.tpt;
2503	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2504	adapter->colc_old = adapter->stats.colc;
2505
2506	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2507	adapter->gorcl_old = adapter->stats.gorcl;
2508	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2509	adapter->gotcl_old = adapter->stats.gotcl;
2510
2511	e1000_update_adaptive(hw);
2512
2513	if (!netif_carrier_ok(netdev)) {
2514		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2515			/* We've lost link, so the controller stops DMA,
2516			 * but we've got queued Tx work that's never going
2517			 * to get done, so reset controller to flush Tx.
2518			 * (Do the reset outside of interrupt context).
2519			 */
2520			adapter->tx_timeout_count++;
2521			schedule_work(&adapter->reset_task);
2522			/* exit immediately since reset is imminent */
2523			return;
2524		}
2525	}
2526
2527	/* Simple mode for Interrupt Throttle Rate (ITR) */
2528	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2529		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2530		 * Total asymmetrical Tx or Rx gets ITR=8000;
2531		 * everyone else is between 2000-8000.
2532		 */
2533		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2534		u32 dif = (adapter->gotcl > adapter->gorcl ?
2535			    adapter->gotcl - adapter->gorcl :
2536			    adapter->gorcl - adapter->gotcl) / 10000;
2537		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2538
2539		ew32(ITR, 1000000000 / (itr * 256));
2540	}
2541
2542	/* Cause software interrupt to ensure rx ring is cleaned */
2543	ew32(ICS, E1000_ICS_RXDMT0);
2544
2545	/* Force detection of hung controller every watchdog period */
2546	adapter->detect_tx_hung = true;
2547
2548	/* Reschedule the task */
2549	if (!test_bit(__E1000_DOWN, &adapter->flags))
2550		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2551}
2552
2553enum latency_range {
2554	lowest_latency = 0,
2555	low_latency = 1,
2556	bulk_latency = 2,
2557	latency_invalid = 255
2558};
2559
2560/**
2561 * e1000_update_itr - update the dynamic ITR value based on statistics
2562 * @adapter: pointer to adapter
2563 * @itr_setting: current adapter->itr
2564 * @packets: the number of packets during this measurement interval
2565 * @bytes: the number of bytes during this measurement interval
2566 *
2567 *      Stores a new ITR value based on packets and byte
2568 *      counts during the last interrupt.  The advantage of per interrupt
2569 *      computation is faster updates and more accurate ITR for the current
2570 *      traffic pattern.  Constants in this function were computed
2571 *      based on theoretical maximum wire speed and thresholds were set based
2572 *      on testing data as well as attempting to minimize response time
2573 *      while increasing bulk throughput.
2574 *      this functionality is controlled by the InterruptThrottleRate module
2575 *      parameter (see e1000_param.c)
2576 **/
2577static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2578				     u16 itr_setting, int packets, int bytes)
2579{
2580	unsigned int retval = itr_setting;
2581	struct e1000_hw *hw = &adapter->hw;
2582
2583	if (unlikely(hw->mac_type < e1000_82540))
2584		goto update_itr_done;
2585
2586	if (packets == 0)
2587		goto update_itr_done;
2588
2589	switch (itr_setting) {
2590	case lowest_latency:
2591		/* jumbo frames get bulk treatment*/
2592		if (bytes/packets > 8000)
2593			retval = bulk_latency;
2594		else if ((packets < 5) && (bytes > 512))
2595			retval = low_latency;
2596		break;
2597	case low_latency:  /* 50 usec aka 20000 ints/s */
2598		if (bytes > 10000) {
2599			/* jumbo frames need bulk latency setting */
2600			if (bytes/packets > 8000)
2601				retval = bulk_latency;
2602			else if ((packets < 10) || ((bytes/packets) > 1200))
2603				retval = bulk_latency;
2604			else if ((packets > 35))
2605				retval = lowest_latency;
2606		} else if (bytes/packets > 2000)
2607			retval = bulk_latency;
2608		else if (packets <= 2 && bytes < 512)
2609			retval = lowest_latency;
2610		break;
2611	case bulk_latency: /* 250 usec aka 4000 ints/s */
2612		if (bytes > 25000) {
2613			if (packets > 35)
2614				retval = low_latency;
2615		} else if (bytes < 6000) {
2616			retval = low_latency;
2617		}
2618		break;
2619	}
2620
2621update_itr_done:
2622	return retval;
2623}
2624
2625static void e1000_set_itr(struct e1000_adapter *adapter)
2626{
2627	struct e1000_hw *hw = &adapter->hw;
2628	u16 current_itr;
2629	u32 new_itr = adapter->itr;
2630
2631	if (unlikely(hw->mac_type < e1000_82540))
2632		return;
2633
2634	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2635	if (unlikely(adapter->link_speed != SPEED_1000)) {
 
2636		new_itr = 4000;
2637		goto set_itr_now;
2638	}
2639
2640	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641					   adapter->total_tx_packets,
2642					   adapter->total_tx_bytes);
2643	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2644	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645		adapter->tx_itr = low_latency;
2646
2647	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648					   adapter->total_rx_packets,
2649					   adapter->total_rx_bytes);
2650	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2651	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652		adapter->rx_itr = low_latency;
2653
2654	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655
2656	switch (current_itr) {
2657	/* counts and packets in update_itr are dependent on these numbers */
2658	case lowest_latency:
2659		new_itr = 70000;
2660		break;
2661	case low_latency:
2662		new_itr = 20000; /* aka hwitr = ~200 */
2663		break;
2664	case bulk_latency:
2665		new_itr = 4000;
2666		break;
2667	default:
2668		break;
2669	}
2670
2671set_itr_now:
2672	if (new_itr != adapter->itr) {
2673		/* this attempts to bias the interrupt rate towards Bulk
2674		 * by adding intermediate steps when interrupt rate is
2675		 * increasing
2676		 */
2677		new_itr = new_itr > adapter->itr ?
2678			  min(adapter->itr + (new_itr >> 2), new_itr) :
2679			  new_itr;
2680		adapter->itr = new_itr;
2681		ew32(ITR, 1000000000 / (new_itr * 256));
2682	}
2683}
2684
2685#define E1000_TX_FLAGS_CSUM		0x00000001
2686#define E1000_TX_FLAGS_VLAN		0x00000002
2687#define E1000_TX_FLAGS_TSO		0x00000004
2688#define E1000_TX_FLAGS_IPV4		0x00000008
2689#define E1000_TX_FLAGS_NO_FCS		0x00000010
2690#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2691#define E1000_TX_FLAGS_VLAN_SHIFT	16
2692
2693static int e1000_tso(struct e1000_adapter *adapter,
2694		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2695		     __be16 protocol)
2696{
2697	struct e1000_context_desc *context_desc;
2698	struct e1000_tx_buffer *buffer_info;
2699	unsigned int i;
2700	u32 cmd_length = 0;
2701	u16 ipcse = 0, tucse, mss;
2702	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703
2704	if (skb_is_gso(skb)) {
2705		int err;
2706
2707		err = skb_cow_head(skb, 0);
2708		if (err < 0)
2709			return err;
2710
2711		hdr_len = skb_tcp_all_headers(skb);
2712		mss = skb_shinfo(skb)->gso_size;
2713		if (protocol == htons(ETH_P_IP)) {
2714			struct iphdr *iph = ip_hdr(skb);
2715			iph->tot_len = 0;
2716			iph->check = 0;
2717			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718								 iph->daddr, 0,
2719								 IPPROTO_TCP,
2720								 0);
2721			cmd_length = E1000_TXD_CMD_IP;
2722			ipcse = skb_transport_offset(skb) - 1;
2723		} else if (skb_is_gso_v6(skb)) {
2724			tcp_v6_gso_csum_prep(skb);
2725			ipcse = 0;
2726		}
2727		ipcss = skb_network_offset(skb);
2728		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729		tucss = skb_transport_offset(skb);
2730		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731		tucse = 0;
2732
2733		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735
2736		i = tx_ring->next_to_use;
2737		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738		buffer_info = &tx_ring->buffer_info[i];
2739
2740		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2741		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2742		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2743		context_desc->upper_setup.tcp_fields.tucss = tucss;
2744		context_desc->upper_setup.tcp_fields.tucso = tucso;
2745		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2747		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749
2750		buffer_info->time_stamp = jiffies;
2751		buffer_info->next_to_watch = i;
2752
2753		if (++i == tx_ring->count)
2754			i = 0;
2755
2756		tx_ring->next_to_use = i;
2757
2758		return true;
2759	}
2760	return false;
2761}
2762
2763static bool e1000_tx_csum(struct e1000_adapter *adapter,
2764			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2765			  __be16 protocol)
2766{
2767	struct e1000_context_desc *context_desc;
2768	struct e1000_tx_buffer *buffer_info;
2769	unsigned int i;
2770	u8 css;
2771	u32 cmd_len = E1000_TXD_CMD_DEXT;
2772
2773	if (skb->ip_summed != CHECKSUM_PARTIAL)
2774		return false;
2775
2776	switch (protocol) {
2777	case cpu_to_be16(ETH_P_IP):
2778		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779			cmd_len |= E1000_TXD_CMD_TCP;
2780		break;
2781	case cpu_to_be16(ETH_P_IPV6):
2782		/* XXX not handling all IPV6 headers */
2783		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784			cmd_len |= E1000_TXD_CMD_TCP;
2785		break;
2786	default:
2787		if (unlikely(net_ratelimit()))
2788			e_warn(drv, "checksum_partial proto=%x!\n",
2789			       skb->protocol);
2790		break;
2791	}
2792
2793	css = skb_checksum_start_offset(skb);
2794
2795	i = tx_ring->next_to_use;
2796	buffer_info = &tx_ring->buffer_info[i];
2797	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798
2799	context_desc->lower_setup.ip_config = 0;
2800	context_desc->upper_setup.tcp_fields.tucss = css;
2801	context_desc->upper_setup.tcp_fields.tucso =
2802		css + skb->csum_offset;
2803	context_desc->upper_setup.tcp_fields.tucse = 0;
2804	context_desc->tcp_seg_setup.data = 0;
2805	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806
2807	buffer_info->time_stamp = jiffies;
2808	buffer_info->next_to_watch = i;
2809
2810	if (unlikely(++i == tx_ring->count))
2811		i = 0;
2812
2813	tx_ring->next_to_use = i;
2814
2815	return true;
2816}
2817
2818#define E1000_MAX_TXD_PWR	12
2819#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2820
2821static int e1000_tx_map(struct e1000_adapter *adapter,
2822			struct e1000_tx_ring *tx_ring,
2823			struct sk_buff *skb, unsigned int first,
2824			unsigned int max_per_txd, unsigned int nr_frags,
2825			unsigned int mss)
2826{
2827	struct e1000_hw *hw = &adapter->hw;
2828	struct pci_dev *pdev = adapter->pdev;
2829	struct e1000_tx_buffer *buffer_info;
2830	unsigned int len = skb_headlen(skb);
2831	unsigned int offset = 0, size, count = 0, i;
2832	unsigned int f, bytecount, segs;
2833
2834	i = tx_ring->next_to_use;
2835
2836	while (len) {
2837		buffer_info = &tx_ring->buffer_info[i];
2838		size = min(len, max_per_txd);
2839		/* Workaround for Controller erratum --
2840		 * descriptor for non-tso packet in a linear SKB that follows a
2841		 * tso gets written back prematurely before the data is fully
2842		 * DMA'd to the controller
2843		 */
2844		if (!skb->data_len && tx_ring->last_tx_tso &&
2845		    !skb_is_gso(skb)) {
2846			tx_ring->last_tx_tso = false;
2847			size -= 4;
2848		}
2849
2850		/* Workaround for premature desc write-backs
2851		 * in TSO mode.  Append 4-byte sentinel desc
2852		 */
2853		if (unlikely(mss && !nr_frags && size == len && size > 8))
2854			size -= 4;
2855		/* work-around for errata 10 and it applies
2856		 * to all controllers in PCI-X mode
2857		 * The fix is to make sure that the first descriptor of a
2858		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2859		 */
2860		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861			     (size > 2015) && count == 0))
2862			size = 2015;
2863
2864		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2865		 * terminating buffers within evenly-aligned dwords.
2866		 */
2867		if (unlikely(adapter->pcix_82544 &&
2868		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869		   size > 4))
2870			size -= 4;
2871
2872		buffer_info->length = size;
2873		/* set time_stamp *before* dma to help avoid a possible race */
2874		buffer_info->time_stamp = jiffies;
2875		buffer_info->mapped_as_page = false;
2876		buffer_info->dma = dma_map_single(&pdev->dev,
2877						  skb->data + offset,
2878						  size, DMA_TO_DEVICE);
2879		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880			goto dma_error;
2881		buffer_info->next_to_watch = i;
2882
2883		len -= size;
2884		offset += size;
2885		count++;
2886		if (len) {
2887			i++;
2888			if (unlikely(i == tx_ring->count))
2889				i = 0;
2890		}
2891	}
2892
2893	for (f = 0; f < nr_frags; f++) {
2894		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895
2896		len = skb_frag_size(frag);
2897		offset = 0;
2898
2899		while (len) {
2900			unsigned long bufend;
2901			i++;
2902			if (unlikely(i == tx_ring->count))
2903				i = 0;
2904
2905			buffer_info = &tx_ring->buffer_info[i];
2906			size = min(len, max_per_txd);
2907			/* Workaround for premature desc write-backs
2908			 * in TSO mode.  Append 4-byte sentinel desc
2909			 */
2910			if (unlikely(mss && f == (nr_frags-1) &&
2911			    size == len && size > 8))
2912				size -= 4;
2913			/* Workaround for potential 82544 hang in PCI-X.
2914			 * Avoid terminating buffers within evenly-aligned
2915			 * dwords.
2916			 */
2917			bufend = (unsigned long)
2918				page_to_phys(skb_frag_page(frag));
2919			bufend += offset + size - 1;
2920			if (unlikely(adapter->pcix_82544 &&
2921				     !(bufend & 4) &&
2922				     size > 4))
2923				size -= 4;
2924
2925			buffer_info->length = size;
2926			buffer_info->time_stamp = jiffies;
2927			buffer_info->mapped_as_page = true;
2928			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929						offset, size, DMA_TO_DEVICE);
2930			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931				goto dma_error;
2932			buffer_info->next_to_watch = i;
2933
2934			len -= size;
2935			offset += size;
2936			count++;
2937		}
2938	}
2939
2940	segs = skb_shinfo(skb)->gso_segs ?: 1;
2941	/* multiply data chunks by size of headers */
2942	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2943
2944	tx_ring->buffer_info[i].skb = skb;
2945	tx_ring->buffer_info[i].segs = segs;
2946	tx_ring->buffer_info[i].bytecount = bytecount;
2947	tx_ring->buffer_info[first].next_to_watch = i;
2948
2949	return count;
2950
2951dma_error:
2952	dev_err(&pdev->dev, "TX DMA map failed\n");
2953	buffer_info->dma = 0;
2954	if (count)
2955		count--;
2956
2957	while (count--) {
2958		if (i == 0)
2959			i += tx_ring->count;
2960		i--;
2961		buffer_info = &tx_ring->buffer_info[i];
2962		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2963	}
2964
2965	return 0;
2966}
2967
2968static void e1000_tx_queue(struct e1000_adapter *adapter,
2969			   struct e1000_tx_ring *tx_ring, int tx_flags,
2970			   int count)
2971{
2972	struct e1000_tx_desc *tx_desc = NULL;
2973	struct e1000_tx_buffer *buffer_info;
2974	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975	unsigned int i;
2976
2977	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979			     E1000_TXD_CMD_TSE;
2980		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981
2982		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984	}
2985
2986	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989	}
2990
2991	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992		txd_lower |= E1000_TXD_CMD_VLE;
2993		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994	}
2995
2996	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2997		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2998
2999	i = tx_ring->next_to_use;
3000
3001	while (count--) {
3002		buffer_info = &tx_ring->buffer_info[i];
3003		tx_desc = E1000_TX_DESC(*tx_ring, i);
3004		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005		tx_desc->lower.data =
3006			cpu_to_le32(txd_lower | buffer_info->length);
3007		tx_desc->upper.data = cpu_to_le32(txd_upper);
3008		if (unlikely(++i == tx_ring->count))
3009			i = 0;
3010	}
3011
3012	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013
3014	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3015	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3016		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3017
3018	/* Force memory writes to complete before letting h/w
3019	 * know there are new descriptors to fetch.  (Only
3020	 * applicable for weak-ordered memory model archs,
3021	 * such as IA-64).
3022	 */
3023	dma_wmb();
3024
3025	tx_ring->next_to_use = i;
3026}
3027
3028/* 82547 workaround to avoid controller hang in half-duplex environment.
3029 * The workaround is to avoid queuing a large packet that would span
3030 * the internal Tx FIFO ring boundary by notifying the stack to resend
3031 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3032 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3033 * to the beginning of the Tx FIFO.
3034 */
3035
3036#define E1000_FIFO_HDR			0x10
3037#define E1000_82547_PAD_LEN		0x3E0
3038
3039static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040				       struct sk_buff *skb)
3041{
3042	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044
3045	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046
3047	if (adapter->link_duplex != HALF_DUPLEX)
3048		goto no_fifo_stall_required;
3049
3050	if (atomic_read(&adapter->tx_fifo_stall))
3051		return 1;
3052
3053	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054		atomic_set(&adapter->tx_fifo_stall, 1);
3055		return 1;
3056	}
3057
3058no_fifo_stall_required:
3059	adapter->tx_fifo_head += skb_fifo_len;
3060	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062	return 0;
3063}
3064
3065static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066{
3067	struct e1000_adapter *adapter = netdev_priv(netdev);
3068	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069
3070	netif_stop_queue(netdev);
3071	/* Herbert's original patch had:
3072	 *  smp_mb__after_netif_stop_queue();
3073	 * but since that doesn't exist yet, just open code it.
3074	 */
3075	smp_mb();
3076
3077	/* We need to check again in a case another CPU has just
3078	 * made room available.
3079	 */
3080	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081		return -EBUSY;
3082
3083	/* A reprieve! */
3084	netif_start_queue(netdev);
3085	++adapter->restart_queue;
3086	return 0;
3087}
3088
3089static int e1000_maybe_stop_tx(struct net_device *netdev,
3090			       struct e1000_tx_ring *tx_ring, int size)
3091{
3092	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093		return 0;
3094	return __e1000_maybe_stop_tx(netdev, size);
3095}
3096
3097#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3098static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099				    struct net_device *netdev)
3100{
3101	struct e1000_adapter *adapter = netdev_priv(netdev);
3102	struct e1000_hw *hw = &adapter->hw;
3103	struct e1000_tx_ring *tx_ring;
3104	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106	unsigned int tx_flags = 0;
3107	unsigned int len = skb_headlen(skb);
3108	unsigned int nr_frags;
3109	unsigned int mss;
3110	int count = 0;
3111	int tso;
3112	unsigned int f;
3113	__be16 protocol = vlan_get_protocol(skb);
3114
3115	/* This goes back to the question of how to logically map a Tx queue
3116	 * to a flow.  Right now, performance is impacted slightly negatively
3117	 * if using multiple Tx queues.  If the stack breaks away from a
3118	 * single qdisc implementation, we can look at this again.
3119	 */
3120	tx_ring = adapter->tx_ring;
3121
3122	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3123	 * packets may get corrupted during padding by HW.
3124	 * To WA this issue, pad all small packets manually.
3125	 */
3126	if (eth_skb_pad(skb))
3127		return NETDEV_TX_OK;
3128
3129	mss = skb_shinfo(skb)->gso_size;
3130	/* The controller does a simple calculation to
3131	 * make sure there is enough room in the FIFO before
3132	 * initiating the DMA for each buffer.  The calc is:
3133	 * 4 = ceil(buffer len/mss).  To make sure we don't
3134	 * overrun the FIFO, adjust the max buffer len if mss
3135	 * drops.
3136	 */
3137	if (mss) {
3138		u8 hdr_len;
3139		max_per_txd = min(mss << 2, max_per_txd);
3140		max_txd_pwr = fls(max_per_txd) - 1;
3141
3142		hdr_len = skb_tcp_all_headers(skb);
3143		if (skb->data_len && hdr_len == len) {
3144			switch (hw->mac_type) {
3145			case e1000_82544: {
3146				unsigned int pull_size;
3147
3148				/* Make sure we have room to chop off 4 bytes,
3149				 * and that the end alignment will work out to
3150				 * this hardware's requirements
3151				 * NOTE: this is a TSO only workaround
3152				 * if end byte alignment not correct move us
3153				 * into the next dword
3154				 */
3155				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3156				    & 4)
3157					break;
3158				pull_size = min((unsigned int)4, skb->data_len);
3159				if (!__pskb_pull_tail(skb, pull_size)) {
3160					e_err(drv, "__pskb_pull_tail "
3161					      "failed.\n");
3162					dev_kfree_skb_any(skb);
3163					return NETDEV_TX_OK;
3164				}
3165				len = skb_headlen(skb);
3166				break;
3167			}
3168			default:
3169				/* do nothing */
3170				break;
3171			}
3172		}
3173	}
3174
3175	/* reserve a descriptor for the offload context */
3176	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177		count++;
3178	count++;
3179
3180	/* Controller Erratum workaround */
3181	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182		count++;
3183
3184	count += TXD_USE_COUNT(len, max_txd_pwr);
3185
3186	if (adapter->pcix_82544)
3187		count++;
3188
3189	/* work-around for errata 10 and it applies to all controllers
3190	 * in PCI-X mode, so add one more descriptor to the count
3191	 */
3192	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193			(len > 2015)))
3194		count++;
3195
3196	nr_frags = skb_shinfo(skb)->nr_frags;
3197	for (f = 0; f < nr_frags; f++)
3198		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199				       max_txd_pwr);
3200	if (adapter->pcix_82544)
3201		count += nr_frags;
3202
3203	/* need: count + 2 desc gap to keep tail from touching
3204	 * head, otherwise try next time
3205	 */
3206	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207		return NETDEV_TX_BUSY;
3208
3209	if (unlikely((hw->mac_type == e1000_82547) &&
3210		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3211		netif_stop_queue(netdev);
3212		if (!test_bit(__E1000_DOWN, &adapter->flags))
3213			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214		return NETDEV_TX_BUSY;
3215	}
3216
3217	if (skb_vlan_tag_present(skb)) {
3218		tx_flags |= E1000_TX_FLAGS_VLAN;
3219		tx_flags |= (skb_vlan_tag_get(skb) <<
3220			     E1000_TX_FLAGS_VLAN_SHIFT);
3221	}
3222
3223	first = tx_ring->next_to_use;
3224
3225	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226	if (tso < 0) {
3227		dev_kfree_skb_any(skb);
3228		return NETDEV_TX_OK;
3229	}
3230
3231	if (likely(tso)) {
3232		if (likely(hw->mac_type != e1000_82544))
3233			tx_ring->last_tx_tso = true;
3234		tx_flags |= E1000_TX_FLAGS_TSO;
3235	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236		tx_flags |= E1000_TX_FLAGS_CSUM;
3237
3238	if (protocol == htons(ETH_P_IP))
3239		tx_flags |= E1000_TX_FLAGS_IPV4;
3240
3241	if (unlikely(skb->no_fcs))
3242		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3243
3244	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245			     nr_frags, mss);
3246
3247	if (count) {
3248		/* The descriptors needed is higher than other Intel drivers
3249		 * due to a number of workarounds.  The breakdown is below:
3250		 * Data descriptors: MAX_SKB_FRAGS + 1
3251		 * Context Descriptor: 1
3252		 * Keep head from touching tail: 2
3253		 * Workarounds: 3
3254		 */
3255		int desc_needed = MAX_SKB_FRAGS + 7;
3256
3257		netdev_sent_queue(netdev, skb->len);
3258		skb_tx_timestamp(skb);
3259
3260		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261
3262		/* 82544 potentially requires twice as many data descriptors
3263		 * in order to guarantee buffers don't end on evenly-aligned
3264		 * dwords
3265		 */
3266		if (adapter->pcix_82544)
3267			desc_needed += MAX_SKB_FRAGS + 1;
3268
3269		/* Make sure there is space in the ring for the next send. */
3270		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271
3272		if (!netdev_xmit_more() ||
3273		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3274			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3275		}
3276	} else {
3277		dev_kfree_skb_any(skb);
3278		tx_ring->buffer_info[first].time_stamp = 0;
3279		tx_ring->next_to_use = first;
3280	}
3281
3282	return NETDEV_TX_OK;
3283}
3284
3285#define NUM_REGS 38 /* 1 based count */
3286static void e1000_regdump(struct e1000_adapter *adapter)
3287{
3288	struct e1000_hw *hw = &adapter->hw;
3289	u32 regs[NUM_REGS];
3290	u32 *regs_buff = regs;
3291	int i = 0;
3292
3293	static const char * const reg_name[] = {
3294		"CTRL",  "STATUS",
3295		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297		"TIDV", "TXDCTL", "TADV", "TARC0",
3298		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299		"TXDCTL1", "TARC1",
3300		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303	};
3304
3305	regs_buff[0]  = er32(CTRL);
3306	regs_buff[1]  = er32(STATUS);
3307
3308	regs_buff[2]  = er32(RCTL);
3309	regs_buff[3]  = er32(RDLEN);
3310	regs_buff[4]  = er32(RDH);
3311	regs_buff[5]  = er32(RDT);
3312	regs_buff[6]  = er32(RDTR);
3313
3314	regs_buff[7]  = er32(TCTL);
3315	regs_buff[8]  = er32(TDBAL);
3316	regs_buff[9]  = er32(TDBAH);
3317	regs_buff[10] = er32(TDLEN);
3318	regs_buff[11] = er32(TDH);
3319	regs_buff[12] = er32(TDT);
3320	regs_buff[13] = er32(TIDV);
3321	regs_buff[14] = er32(TXDCTL);
3322	regs_buff[15] = er32(TADV);
3323	regs_buff[16] = er32(TARC0);
3324
3325	regs_buff[17] = er32(TDBAL1);
3326	regs_buff[18] = er32(TDBAH1);
3327	regs_buff[19] = er32(TDLEN1);
3328	regs_buff[20] = er32(TDH1);
3329	regs_buff[21] = er32(TDT1);
3330	regs_buff[22] = er32(TXDCTL1);
3331	regs_buff[23] = er32(TARC1);
3332	regs_buff[24] = er32(CTRL_EXT);
3333	regs_buff[25] = er32(ERT);
3334	regs_buff[26] = er32(RDBAL0);
3335	regs_buff[27] = er32(RDBAH0);
3336	regs_buff[28] = er32(TDFH);
3337	regs_buff[29] = er32(TDFT);
3338	regs_buff[30] = er32(TDFHS);
3339	regs_buff[31] = er32(TDFTS);
3340	regs_buff[32] = er32(TDFPC);
3341	regs_buff[33] = er32(RDFH);
3342	regs_buff[34] = er32(RDFT);
3343	regs_buff[35] = er32(RDFHS);
3344	regs_buff[36] = er32(RDFTS);
3345	regs_buff[37] = er32(RDFPC);
3346
3347	pr_info("Register dump\n");
3348	for (i = 0; i < NUM_REGS; i++)
3349		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3350}
3351
3352/*
3353 * e1000_dump: Print registers, tx ring and rx ring
3354 */
3355static void e1000_dump(struct e1000_adapter *adapter)
3356{
3357	/* this code doesn't handle multiple rings */
3358	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360	int i;
3361
3362	if (!netif_msg_hw(adapter))
3363		return;
3364
3365	/* Print Registers */
3366	e1000_regdump(adapter);
3367
3368	/* transmit dump */
3369	pr_info("TX Desc ring0 dump\n");
3370
3371	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372	 *
3373	 * Legacy Transmit Descriptor
3374	 *   +--------------------------------------------------------------+
3375	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3376	 *   +--------------------------------------------------------------+
3377	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3378	 *   +--------------------------------------------------------------+
3379	 *   63       48 47        36 35    32 31     24 23    16 15        0
3380	 *
3381	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382	 *   63      48 47    40 39       32 31             16 15    8 7      0
3383	 *   +----------------------------------------------------------------+
3384	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3385	 *   +----------------------------------------------------------------+
3386	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3387	 *   +----------------------------------------------------------------+
3388	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3389	 *
3390	 * Extended Data Descriptor (DTYP=0x1)
3391	 *   +----------------------------------------------------------------+
3392	 * 0 |                     Buffer Address [63:0]                      |
3393	 *   +----------------------------------------------------------------+
3394	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3395	 *   +----------------------------------------------------------------+
3396	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3397	 */
3398	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3400
3401	if (!netif_msg_tx_done(adapter))
3402		goto rx_ring_summary;
3403
3404	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407		struct my_u { __le64 a; __le64 b; };
3408		struct my_u *u = (struct my_u *)tx_desc;
3409		const char *type;
3410
3411		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412			type = "NTC/U";
3413		else if (i == tx_ring->next_to_use)
3414			type = "NTU";
3415		else if (i == tx_ring->next_to_clean)
3416			type = "NTC";
3417		else
3418			type = "";
3419
3420		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3421			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422			le64_to_cpu(u->a), le64_to_cpu(u->b),
3423			(u64)buffer_info->dma, buffer_info->length,
3424			buffer_info->next_to_watch,
3425			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3426	}
3427
3428rx_ring_summary:
3429	/* receive dump */
3430	pr_info("\nRX Desc ring dump\n");
3431
3432	/* Legacy Receive Descriptor Format
3433	 *
3434	 * +-----------------------------------------------------+
3435	 * |                Buffer Address [63:0]                |
3436	 * +-----------------------------------------------------+
3437	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3438	 * +-----------------------------------------------------+
3439	 * 63       48 47    40 39      32 31         16 15      0
3440	 */
3441	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3442
3443	if (!netif_msg_rx_status(adapter))
3444		goto exit;
3445
3446	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3448		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449		struct my_u { __le64 a; __le64 b; };
3450		struct my_u *u = (struct my_u *)rx_desc;
3451		const char *type;
3452
3453		if (i == rx_ring->next_to_use)
3454			type = "NTU";
3455		else if (i == rx_ring->next_to_clean)
3456			type = "NTC";
3457		else
3458			type = "";
3459
3460		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3461			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3462			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463	} /* for */
3464
3465	/* dump the descriptor caches */
3466	/* rx */
3467	pr_info("Rx descriptor cache in 64bit format\n");
3468	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470			i,
3471			readl(adapter->hw.hw_addr + i+4),
3472			readl(adapter->hw.hw_addr + i),
3473			readl(adapter->hw.hw_addr + i+12),
3474			readl(adapter->hw.hw_addr + i+8));
3475	}
3476	/* tx */
3477	pr_info("Tx descriptor cache in 64bit format\n");
3478	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480			i,
3481			readl(adapter->hw.hw_addr + i+4),
3482			readl(adapter->hw.hw_addr + i),
3483			readl(adapter->hw.hw_addr + i+12),
3484			readl(adapter->hw.hw_addr + i+8));
3485	}
3486exit:
3487	return;
3488}
3489
3490/**
3491 * e1000_tx_timeout - Respond to a Tx Hang
3492 * @netdev: network interface device structure
3493 * @txqueue: number of the Tx queue that hung (unused)
3494 **/
3495static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496{
3497	struct e1000_adapter *adapter = netdev_priv(netdev);
3498
3499	/* Do the reset outside of interrupt context */
3500	adapter->tx_timeout_count++;
3501	schedule_work(&adapter->reset_task);
3502}
3503
3504static void e1000_reset_task(struct work_struct *work)
3505{
3506	struct e1000_adapter *adapter =
3507		container_of(work, struct e1000_adapter, reset_task);
3508
3509	e_err(drv, "Reset adapter\n");
3510	e1000_reinit_locked(adapter);
3511}
3512
3513/**
3514 * e1000_change_mtu - Change the Maximum Transfer Unit
3515 * @netdev: network interface device structure
3516 * @new_mtu: new value for maximum frame size
3517 *
3518 * Returns 0 on success, negative on failure
3519 **/
3520static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521{
3522	struct e1000_adapter *adapter = netdev_priv(netdev);
3523	struct e1000_hw *hw = &adapter->hw;
3524	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525
3526	/* Adapter-specific max frame size limits. */
3527	switch (hw->mac_type) {
3528	case e1000_undefined ... e1000_82542_rev2_1:
3529		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530			e_err(probe, "Jumbo Frames not supported.\n");
3531			return -EINVAL;
3532		}
3533		break;
3534	default:
3535		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536		break;
3537	}
3538
3539	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540		msleep(1);
3541	/* e1000_down has a dependency on max_frame_size */
3542	hw->max_frame_size = max_frame;
3543	if (netif_running(netdev)) {
3544		/* prevent buffers from being reallocated */
3545		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546		e1000_down(adapter);
3547	}
3548
3549	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3550	 * means we reserve 2 more, this pushes us to allocate from the next
3551	 * larger slab size.
3552	 * i.e. RXBUFFER_2048 --> size-4096 slab
3553	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3554	 * fragmented skbs
3555	 */
3556
3557	if (max_frame <= E1000_RXBUFFER_2048)
3558		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559	else
3560#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563		adapter->rx_buffer_len = PAGE_SIZE;
3564#endif
3565
3566	/* adjust allocation if LPE protects us, and we aren't using SBP */
3567	if (!hw->tbi_compatibility_on &&
3568	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571
3572	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3573		   netdev->mtu, new_mtu);
3574	netdev->mtu = new_mtu;
3575
3576	if (netif_running(netdev))
3577		e1000_up(adapter);
3578	else
3579		e1000_reset(adapter);
3580
3581	clear_bit(__E1000_RESETTING, &adapter->flags);
3582
3583	return 0;
3584}
3585
3586/**
3587 * e1000_update_stats - Update the board statistics counters
3588 * @adapter: board private structure
3589 **/
3590void e1000_update_stats(struct e1000_adapter *adapter)
3591{
3592	struct net_device *netdev = adapter->netdev;
3593	struct e1000_hw *hw = &adapter->hw;
3594	struct pci_dev *pdev = adapter->pdev;
3595	unsigned long flags;
3596	u16 phy_tmp;
3597
3598#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599
3600	/* Prevent stats update while adapter is being reset, or if the pci
3601	 * connection is down.
3602	 */
3603	if (adapter->link_speed == 0)
3604		return;
3605	if (pci_channel_offline(pdev))
3606		return;
3607
3608	spin_lock_irqsave(&adapter->stats_lock, flags);
3609
3610	/* these counters are modified from e1000_tbi_adjust_stats,
3611	 * called from the interrupt context, so they must only
3612	 * be written while holding adapter->stats_lock
3613	 */
3614
3615	adapter->stats.crcerrs += er32(CRCERRS);
3616	adapter->stats.gprc += er32(GPRC);
3617	adapter->stats.gorcl += er32(GORCL);
3618	adapter->stats.gorch += er32(GORCH);
3619	adapter->stats.bprc += er32(BPRC);
3620	adapter->stats.mprc += er32(MPRC);
3621	adapter->stats.roc += er32(ROC);
3622
3623	adapter->stats.prc64 += er32(PRC64);
3624	adapter->stats.prc127 += er32(PRC127);
3625	adapter->stats.prc255 += er32(PRC255);
3626	adapter->stats.prc511 += er32(PRC511);
3627	adapter->stats.prc1023 += er32(PRC1023);
3628	adapter->stats.prc1522 += er32(PRC1522);
3629
3630	adapter->stats.symerrs += er32(SYMERRS);
3631	adapter->stats.mpc += er32(MPC);
3632	adapter->stats.scc += er32(SCC);
3633	adapter->stats.ecol += er32(ECOL);
3634	adapter->stats.mcc += er32(MCC);
3635	adapter->stats.latecol += er32(LATECOL);
3636	adapter->stats.dc += er32(DC);
3637	adapter->stats.sec += er32(SEC);
3638	adapter->stats.rlec += er32(RLEC);
3639	adapter->stats.xonrxc += er32(XONRXC);
3640	adapter->stats.xontxc += er32(XONTXC);
3641	adapter->stats.xoffrxc += er32(XOFFRXC);
3642	adapter->stats.xofftxc += er32(XOFFTXC);
3643	adapter->stats.fcruc += er32(FCRUC);
3644	adapter->stats.gptc += er32(GPTC);
3645	adapter->stats.gotcl += er32(GOTCL);
3646	adapter->stats.gotch += er32(GOTCH);
3647	adapter->stats.rnbc += er32(RNBC);
3648	adapter->stats.ruc += er32(RUC);
3649	adapter->stats.rfc += er32(RFC);
3650	adapter->stats.rjc += er32(RJC);
3651	adapter->stats.torl += er32(TORL);
3652	adapter->stats.torh += er32(TORH);
3653	adapter->stats.totl += er32(TOTL);
3654	adapter->stats.toth += er32(TOTH);
3655	adapter->stats.tpr += er32(TPR);
3656
3657	adapter->stats.ptc64 += er32(PTC64);
3658	adapter->stats.ptc127 += er32(PTC127);
3659	adapter->stats.ptc255 += er32(PTC255);
3660	adapter->stats.ptc511 += er32(PTC511);
3661	adapter->stats.ptc1023 += er32(PTC1023);
3662	adapter->stats.ptc1522 += er32(PTC1522);
3663
3664	adapter->stats.mptc += er32(MPTC);
3665	adapter->stats.bptc += er32(BPTC);
3666
3667	/* used for adaptive IFS */
3668
3669	hw->tx_packet_delta = er32(TPT);
3670	adapter->stats.tpt += hw->tx_packet_delta;
3671	hw->collision_delta = er32(COLC);
3672	adapter->stats.colc += hw->collision_delta;
3673
3674	if (hw->mac_type >= e1000_82543) {
3675		adapter->stats.algnerrc += er32(ALGNERRC);
3676		adapter->stats.rxerrc += er32(RXERRC);
3677		adapter->stats.tncrs += er32(TNCRS);
3678		adapter->stats.cexterr += er32(CEXTERR);
3679		adapter->stats.tsctc += er32(TSCTC);
3680		adapter->stats.tsctfc += er32(TSCTFC);
3681	}
3682
3683	/* Fill out the OS statistics structure */
3684	netdev->stats.multicast = adapter->stats.mprc;
3685	netdev->stats.collisions = adapter->stats.colc;
3686
3687	/* Rx Errors */
3688
3689	/* RLEC on some newer hardware can be incorrect so build
3690	 * our own version based on RUC and ROC
3691	 */
3692	netdev->stats.rx_errors = adapter->stats.rxerrc +
3693		adapter->stats.crcerrs + adapter->stats.algnerrc +
3694		adapter->stats.ruc + adapter->stats.roc +
3695		adapter->stats.cexterr;
3696	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701
3702	/* Tx Errors */
3703	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704	netdev->stats.tx_errors = adapter->stats.txerrc;
3705	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706	netdev->stats.tx_window_errors = adapter->stats.latecol;
3707	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708	if (hw->bad_tx_carr_stats_fd &&
3709	    adapter->link_duplex == FULL_DUPLEX) {
3710		netdev->stats.tx_carrier_errors = 0;
3711		adapter->stats.tncrs = 0;
3712	}
3713
3714	/* Tx Dropped needs to be maintained elsewhere */
3715
3716	/* Phy Stats */
3717	if (hw->media_type == e1000_media_type_copper) {
3718		if ((adapter->link_speed == SPEED_1000) &&
3719		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721			adapter->phy_stats.idle_errors += phy_tmp;
3722		}
3723
3724		if ((hw->mac_type <= e1000_82546) &&
3725		   (hw->phy_type == e1000_phy_m88) &&
3726		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727			adapter->phy_stats.receive_errors += phy_tmp;
3728	}
3729
3730	/* Management Stats */
3731	if (hw->has_smbus) {
3732		adapter->stats.mgptc += er32(MGTPTC);
3733		adapter->stats.mgprc += er32(MGTPRC);
3734		adapter->stats.mgpdc += er32(MGTPDC);
3735	}
3736
3737	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738}
3739
3740/**
3741 * e1000_intr - Interrupt Handler
3742 * @irq: interrupt number
3743 * @data: pointer to a network interface device structure
3744 **/
3745static irqreturn_t e1000_intr(int irq, void *data)
3746{
3747	struct net_device *netdev = data;
3748	struct e1000_adapter *adapter = netdev_priv(netdev);
3749	struct e1000_hw *hw = &adapter->hw;
3750	u32 icr = er32(ICR);
3751
3752	if (unlikely((!icr)))
3753		return IRQ_NONE;  /* Not our interrupt */
3754
3755	/* we might have caused the interrupt, but the above
3756	 * read cleared it, and just in case the driver is
3757	 * down there is nothing to do so return handled
3758	 */
3759	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760		return IRQ_HANDLED;
3761
3762	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763		hw->get_link_status = 1;
3764		/* guard against interrupt when we're going down */
3765		if (!test_bit(__E1000_DOWN, &adapter->flags))
3766			schedule_delayed_work(&adapter->watchdog_task, 1);
3767	}
3768
3769	/* disable interrupts, without the synchronize_irq bit */
3770	ew32(IMC, ~0);
3771	E1000_WRITE_FLUSH();
3772
3773	if (likely(napi_schedule_prep(&adapter->napi))) {
3774		adapter->total_tx_bytes = 0;
3775		adapter->total_tx_packets = 0;
3776		adapter->total_rx_bytes = 0;
3777		adapter->total_rx_packets = 0;
3778		__napi_schedule(&adapter->napi);
3779	} else {
3780		/* this really should not happen! if it does it is basically a
3781		 * bug, but not a hard error, so enable ints and continue
3782		 */
3783		if (!test_bit(__E1000_DOWN, &adapter->flags))
3784			e1000_irq_enable(adapter);
3785	}
3786
3787	return IRQ_HANDLED;
3788}
3789
3790/**
3791 * e1000_clean - NAPI Rx polling callback
3792 * @napi: napi struct containing references to driver info
3793 * @budget: budget given to driver for receive packets
3794 **/
3795static int e1000_clean(struct napi_struct *napi, int budget)
3796{
3797	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3798						     napi);
3799	int tx_clean_complete = 0, work_done = 0;
3800
3801	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802
3803	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804
3805	if (!tx_clean_complete || work_done == budget)
3806		return budget;
3807
3808	/* Exit the polling mode, but don't re-enable interrupts if stack might
3809	 * poll us due to busy-polling
3810	 */
3811	if (likely(napi_complete_done(napi, work_done))) {
3812		if (likely(adapter->itr_setting & 3))
3813			e1000_set_itr(adapter);
3814		if (!test_bit(__E1000_DOWN, &adapter->flags))
3815			e1000_irq_enable(adapter);
3816	}
3817
3818	return work_done;
3819}
3820
3821/**
3822 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823 * @adapter: board private structure
3824 * @tx_ring: ring to clean
3825 **/
3826static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827			       struct e1000_tx_ring *tx_ring)
3828{
3829	struct e1000_hw *hw = &adapter->hw;
3830	struct net_device *netdev = adapter->netdev;
3831	struct e1000_tx_desc *tx_desc, *eop_desc;
3832	struct e1000_tx_buffer *buffer_info;
3833	unsigned int i, eop;
3834	unsigned int count = 0;
3835	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3836	unsigned int bytes_compl = 0, pkts_compl = 0;
3837
3838	i = tx_ring->next_to_clean;
3839	eop = tx_ring->buffer_info[i].next_to_watch;
3840	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841
3842	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843	       (count < tx_ring->count)) {
3844		bool cleaned = false;
3845		dma_rmb();	/* read buffer_info after eop_desc */
3846		for ( ; !cleaned; count++) {
3847			tx_desc = E1000_TX_DESC(*tx_ring, i);
3848			buffer_info = &tx_ring->buffer_info[i];
3849			cleaned = (i == eop);
3850
3851			if (cleaned) {
3852				total_tx_packets += buffer_info->segs;
3853				total_tx_bytes += buffer_info->bytecount;
3854				if (buffer_info->skb) {
3855					bytes_compl += buffer_info->skb->len;
3856					pkts_compl++;
3857				}
3858
3859			}
3860			e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3861							 64);
3862			tx_desc->upper.data = 0;
3863
3864			if (unlikely(++i == tx_ring->count))
3865				i = 0;
3866		}
3867
3868		eop = tx_ring->buffer_info[i].next_to_watch;
3869		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870	}
3871
3872	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3873	 * which will reuse the cleaned buffers.
3874	 */
3875	smp_store_release(&tx_ring->next_to_clean, i);
3876
3877	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3878
3879#define TX_WAKE_THRESHOLD 32
3880	if (unlikely(count && netif_carrier_ok(netdev) &&
3881		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3882		/* Make sure that anybody stopping the queue after this
3883		 * sees the new next_to_clean.
3884		 */
3885		smp_mb();
3886
3887		if (netif_queue_stopped(netdev) &&
3888		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3889			netif_wake_queue(netdev);
3890			++adapter->restart_queue;
3891		}
3892	}
3893
3894	if (adapter->detect_tx_hung) {
3895		/* Detect a transmit hang in hardware, this serializes the
3896		 * check with the clearing of time_stamp and movement of i
3897		 */
3898		adapter->detect_tx_hung = false;
3899		if (tx_ring->buffer_info[eop].time_stamp &&
3900		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3901			       (adapter->tx_timeout_factor * HZ)) &&
3902		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3903
3904			/* detected Tx unit hang */
3905			e_err(drv, "Detected Tx Unit Hang\n"
3906			      "  Tx Queue             <%lu>\n"
3907			      "  TDH                  <%x>\n"
3908			      "  TDT                  <%x>\n"
3909			      "  next_to_use          <%x>\n"
3910			      "  next_to_clean        <%x>\n"
3911			      "buffer_info[next_to_clean]\n"
3912			      "  time_stamp           <%lx>\n"
3913			      "  next_to_watch        <%x>\n"
3914			      "  jiffies              <%lx>\n"
3915			      "  next_to_watch.status <%x>\n",
3916				(unsigned long)(tx_ring - adapter->tx_ring),
3917				readl(hw->hw_addr + tx_ring->tdh),
3918				readl(hw->hw_addr + tx_ring->tdt),
3919				tx_ring->next_to_use,
3920				tx_ring->next_to_clean,
3921				tx_ring->buffer_info[eop].time_stamp,
3922				eop,
3923				jiffies,
3924				eop_desc->upper.fields.status);
3925			e1000_dump(adapter);
3926			netif_stop_queue(netdev);
3927		}
3928	}
3929	adapter->total_tx_bytes += total_tx_bytes;
3930	adapter->total_tx_packets += total_tx_packets;
3931	netdev->stats.tx_bytes += total_tx_bytes;
3932	netdev->stats.tx_packets += total_tx_packets;
3933	return count < tx_ring->count;
3934}
3935
3936/**
3937 * e1000_rx_checksum - Receive Checksum Offload for 82543
3938 * @adapter:     board private structure
3939 * @status_err:  receive descriptor status and error fields
3940 * @csum:        receive descriptor csum field
3941 * @skb:         socket buffer with received data
3942 **/
3943static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3944			      u32 csum, struct sk_buff *skb)
3945{
3946	struct e1000_hw *hw = &adapter->hw;
3947	u16 status = (u16)status_err;
3948	u8 errors = (u8)(status_err >> 24);
3949
3950	skb_checksum_none_assert(skb);
3951
3952	/* 82543 or newer only */
3953	if (unlikely(hw->mac_type < e1000_82543))
3954		return;
3955	/* Ignore Checksum bit is set */
3956	if (unlikely(status & E1000_RXD_STAT_IXSM))
3957		return;
3958	/* TCP/UDP checksum error bit is set */
3959	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3960		/* let the stack verify checksum errors */
3961		adapter->hw_csum_err++;
3962		return;
3963	}
3964	/* TCP/UDP Checksum has not been calculated */
3965	if (!(status & E1000_RXD_STAT_TCPCS))
3966		return;
3967
3968	/* It must be a TCP or UDP packet with a valid checksum */
3969	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3970		/* TCP checksum is good */
3971		skb->ip_summed = CHECKSUM_UNNECESSARY;
3972	}
3973	adapter->hw_csum_good++;
3974}
3975
3976/**
3977 * e1000_consume_page - helper function for jumbo Rx path
3978 * @bi: software descriptor shadow data
3979 * @skb: skb being modified
3980 * @length: length of data being added
3981 **/
3982static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3983			       u16 length)
3984{
3985	bi->rxbuf.page = NULL;
3986	skb->len += length;
3987	skb->data_len += length;
3988	skb->truesize += PAGE_SIZE;
3989}
3990
3991/**
3992 * e1000_receive_skb - helper function to handle rx indications
3993 * @adapter: board private structure
3994 * @status: descriptor status field as written by hardware
3995 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3996 * @skb: pointer to sk_buff to be indicated to stack
3997 */
3998static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3999			      __le16 vlan, struct sk_buff *skb)
4000{
4001	skb->protocol = eth_type_trans(skb, adapter->netdev);
4002
4003	if (status & E1000_RXD_STAT_VP) {
4004		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4005
4006		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4007	}
4008	napi_gro_receive(&adapter->napi, skb);
4009}
4010
4011/**
4012 * e1000_tbi_adjust_stats
4013 * @hw: Struct containing variables accessed by shared code
4014 * @stats: point to stats struct
4015 * @frame_len: The length of the frame in question
4016 * @mac_addr: The Ethernet destination address of the frame in question
4017 *
4018 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4019 */
4020static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4021				   struct e1000_hw_stats *stats,
4022				   u32 frame_len, const u8 *mac_addr)
4023{
4024	u64 carry_bit;
4025
4026	/* First adjust the frame length. */
4027	frame_len--;
4028	/* We need to adjust the statistics counters, since the hardware
4029	 * counters overcount this packet as a CRC error and undercount
4030	 * the packet as a good packet
4031	 */
4032	/* This packet should not be counted as a CRC error. */
4033	stats->crcerrs--;
4034	/* This packet does count as a Good Packet Received. */
4035	stats->gprc++;
4036
4037	/* Adjust the Good Octets received counters */
4038	carry_bit = 0x80000000 & stats->gorcl;
4039	stats->gorcl += frame_len;
4040	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4041	 * Received Count) was one before the addition,
4042	 * AND it is zero after, then we lost the carry out,
4043	 * need to add one to Gorch (Good Octets Received Count High).
4044	 * This could be simplified if all environments supported
4045	 * 64-bit integers.
4046	 */
4047	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4048		stats->gorch++;
4049	/* Is this a broadcast or multicast?  Check broadcast first,
4050	 * since the test for a multicast frame will test positive on
4051	 * a broadcast frame.
4052	 */
4053	if (is_broadcast_ether_addr(mac_addr))
4054		stats->bprc++;
4055	else if (is_multicast_ether_addr(mac_addr))
4056		stats->mprc++;
4057
4058	if (frame_len == hw->max_frame_size) {
4059		/* In this case, the hardware has overcounted the number of
4060		 * oversize frames.
4061		 */
4062		if (stats->roc > 0)
4063			stats->roc--;
4064	}
4065
4066	/* Adjust the bin counters when the extra byte put the frame in the
4067	 * wrong bin. Remember that the frame_len was adjusted above.
4068	 */
4069	if (frame_len == 64) {
4070		stats->prc64++;
4071		stats->prc127--;
4072	} else if (frame_len == 127) {
4073		stats->prc127++;
4074		stats->prc255--;
4075	} else if (frame_len == 255) {
4076		stats->prc255++;
4077		stats->prc511--;
4078	} else if (frame_len == 511) {
4079		stats->prc511++;
4080		stats->prc1023--;
4081	} else if (frame_len == 1023) {
4082		stats->prc1023++;
4083		stats->prc1522--;
4084	} else if (frame_len == 1522) {
4085		stats->prc1522++;
4086	}
4087}
4088
4089static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4090				    u8 status, u8 errors,
4091				    u32 length, const u8 *data)
4092{
4093	struct e1000_hw *hw = &adapter->hw;
4094	u8 last_byte = *(data + length - 1);
4095
4096	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4097		unsigned long irq_flags;
4098
4099		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4100		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4101		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4102
4103		return true;
4104	}
4105
4106	return false;
4107}
4108
4109static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4110					  unsigned int bufsz)
4111{
4112	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4113
4114	if (unlikely(!skb))
4115		adapter->alloc_rx_buff_failed++;
4116	return skb;
4117}
4118
4119/**
4120 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4121 * @adapter: board private structure
4122 * @rx_ring: ring to clean
4123 * @work_done: amount of napi work completed this call
4124 * @work_to_do: max amount of work allowed for this call to do
4125 *
4126 * the return value indicates whether actual cleaning was done, there
4127 * is no guarantee that everything was cleaned
4128 */
4129static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4130				     struct e1000_rx_ring *rx_ring,
4131				     int *work_done, int work_to_do)
4132{
4133	struct net_device *netdev = adapter->netdev;
4134	struct pci_dev *pdev = adapter->pdev;
4135	struct e1000_rx_desc *rx_desc, *next_rxd;
4136	struct e1000_rx_buffer *buffer_info, *next_buffer;
4137	u32 length;
4138	unsigned int i;
4139	int cleaned_count = 0;
4140	bool cleaned = false;
4141	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4142
4143	i = rx_ring->next_to_clean;
4144	rx_desc = E1000_RX_DESC(*rx_ring, i);
4145	buffer_info = &rx_ring->buffer_info[i];
4146
4147	while (rx_desc->status & E1000_RXD_STAT_DD) {
4148		struct sk_buff *skb;
4149		u8 status;
4150
4151		if (*work_done >= work_to_do)
4152			break;
4153		(*work_done)++;
4154		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4155
4156		status = rx_desc->status;
4157
4158		if (++i == rx_ring->count)
4159			i = 0;
4160
4161		next_rxd = E1000_RX_DESC(*rx_ring, i);
4162		prefetch(next_rxd);
4163
4164		next_buffer = &rx_ring->buffer_info[i];
4165
4166		cleaned = true;
4167		cleaned_count++;
4168		dma_unmap_page(&pdev->dev, buffer_info->dma,
4169			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4170		buffer_info->dma = 0;
4171
4172		length = le16_to_cpu(rx_desc->length);
4173
4174		/* errors is only valid for DD + EOP descriptors */
4175		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4176		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4177			u8 *mapped = page_address(buffer_info->rxbuf.page);
4178
4179			if (e1000_tbi_should_accept(adapter, status,
4180						    rx_desc->errors,
4181						    length, mapped)) {
4182				length--;
4183			} else if (netdev->features & NETIF_F_RXALL) {
4184				goto process_skb;
4185			} else {
4186				/* an error means any chain goes out the window
4187				 * too
4188				 */
4189				dev_kfree_skb(rx_ring->rx_skb_top);
4190				rx_ring->rx_skb_top = NULL;
4191				goto next_desc;
4192			}
4193		}
4194
4195#define rxtop rx_ring->rx_skb_top
4196process_skb:
4197		if (!(status & E1000_RXD_STAT_EOP)) {
4198			/* this descriptor is only the beginning (or middle) */
4199			if (!rxtop) {
4200				/* this is the beginning of a chain */
4201				rxtop = napi_get_frags(&adapter->napi);
4202				if (!rxtop)
4203					break;
4204
4205				skb_fill_page_desc(rxtop, 0,
4206						   buffer_info->rxbuf.page,
4207						   0, length);
4208			} else {
4209				/* this is the middle of a chain */
4210				skb_fill_page_desc(rxtop,
4211				    skb_shinfo(rxtop)->nr_frags,
4212				    buffer_info->rxbuf.page, 0, length);
4213			}
4214			e1000_consume_page(buffer_info, rxtop, length);
4215			goto next_desc;
4216		} else {
4217			if (rxtop) {
4218				/* end of the chain */
4219				skb_fill_page_desc(rxtop,
4220				    skb_shinfo(rxtop)->nr_frags,
4221				    buffer_info->rxbuf.page, 0, length);
4222				skb = rxtop;
4223				rxtop = NULL;
4224				e1000_consume_page(buffer_info, skb, length);
4225			} else {
4226				struct page *p;
4227				/* no chain, got EOP, this buf is the packet
4228				 * copybreak to save the put_page/alloc_page
4229				 */
4230				p = buffer_info->rxbuf.page;
4231				if (length <= copybreak) {
 
 
4232					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4233						length -= 4;
4234					skb = e1000_alloc_rx_skb(adapter,
4235								 length);
4236					if (!skb)
4237						break;
4238
4239					memcpy(skb_tail_pointer(skb),
4240					       page_address(p), length);
4241
 
4242					/* re-use the page, so don't erase
4243					 * buffer_info->rxbuf.page
4244					 */
4245					skb_put(skb, length);
4246					e1000_rx_checksum(adapter,
4247							  status | rx_desc->errors << 24,
4248							  le16_to_cpu(rx_desc->csum), skb);
4249
4250					total_rx_bytes += skb->len;
4251					total_rx_packets++;
4252
4253					e1000_receive_skb(adapter, status,
4254							  rx_desc->special, skb);
4255					goto next_desc;
4256				} else {
4257					skb = napi_get_frags(&adapter->napi);
4258					if (!skb) {
4259						adapter->alloc_rx_buff_failed++;
4260						break;
4261					}
4262					skb_fill_page_desc(skb, 0, p, 0,
4263							   length);
4264					e1000_consume_page(buffer_info, skb,
4265							   length);
4266				}
4267			}
4268		}
4269
4270		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4271		e1000_rx_checksum(adapter,
4272				  (u32)(status) |
4273				  ((u32)(rx_desc->errors) << 24),
4274				  le16_to_cpu(rx_desc->csum), skb);
4275
4276		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4277		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4278			pskb_trim(skb, skb->len - 4);
4279		total_rx_packets++;
4280
4281		if (status & E1000_RXD_STAT_VP) {
4282			__le16 vlan = rx_desc->special;
4283			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4284
4285			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4286		}
4287
4288		napi_gro_frags(&adapter->napi);
4289
4290next_desc:
4291		rx_desc->status = 0;
4292
4293		/* return some buffers to hardware, one at a time is too slow */
4294		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4295			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4296			cleaned_count = 0;
4297		}
4298
4299		/* use prefetched values */
4300		rx_desc = next_rxd;
4301		buffer_info = next_buffer;
4302	}
4303	rx_ring->next_to_clean = i;
4304
4305	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4306	if (cleaned_count)
4307		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4308
4309	adapter->total_rx_packets += total_rx_packets;
4310	adapter->total_rx_bytes += total_rx_bytes;
4311	netdev->stats.rx_bytes += total_rx_bytes;
4312	netdev->stats.rx_packets += total_rx_packets;
4313	return cleaned;
4314}
4315
4316/* this should improve performance for small packets with large amounts
4317 * of reassembly being done in the stack
4318 */
4319static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4320				       struct e1000_rx_buffer *buffer_info,
4321				       u32 length, const void *data)
4322{
4323	struct sk_buff *skb;
4324
4325	if (length > copybreak)
4326		return NULL;
4327
4328	skb = e1000_alloc_rx_skb(adapter, length);
4329	if (!skb)
4330		return NULL;
4331
4332	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4333				length, DMA_FROM_DEVICE);
4334
4335	skb_put_data(skb, data, length);
4336
4337	return skb;
4338}
4339
4340/**
4341 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4342 * @adapter: board private structure
4343 * @rx_ring: ring to clean
4344 * @work_done: amount of napi work completed this call
4345 * @work_to_do: max amount of work allowed for this call to do
4346 */
4347static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4348			       struct e1000_rx_ring *rx_ring,
4349			       int *work_done, int work_to_do)
4350{
4351	struct net_device *netdev = adapter->netdev;
4352	struct pci_dev *pdev = adapter->pdev;
4353	struct e1000_rx_desc *rx_desc, *next_rxd;
4354	struct e1000_rx_buffer *buffer_info, *next_buffer;
4355	u32 length;
4356	unsigned int i;
4357	int cleaned_count = 0;
4358	bool cleaned = false;
4359	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4360
4361	i = rx_ring->next_to_clean;
4362	rx_desc = E1000_RX_DESC(*rx_ring, i);
4363	buffer_info = &rx_ring->buffer_info[i];
4364
4365	while (rx_desc->status & E1000_RXD_STAT_DD) {
4366		struct sk_buff *skb;
4367		u8 *data;
4368		u8 status;
4369
4370		if (*work_done >= work_to_do)
4371			break;
4372		(*work_done)++;
4373		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4374
4375		status = rx_desc->status;
4376		length = le16_to_cpu(rx_desc->length);
4377
4378		data = buffer_info->rxbuf.data;
4379		prefetch(data);
4380		skb = e1000_copybreak(adapter, buffer_info, length, data);
4381		if (!skb) {
4382			unsigned int frag_len = e1000_frag_len(adapter);
4383
4384			skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4385			if (!skb) {
4386				adapter->alloc_rx_buff_failed++;
4387				break;
4388			}
4389
4390			skb_reserve(skb, E1000_HEADROOM);
4391			dma_unmap_single(&pdev->dev, buffer_info->dma,
4392					 adapter->rx_buffer_len,
4393					 DMA_FROM_DEVICE);
4394			buffer_info->dma = 0;
4395			buffer_info->rxbuf.data = NULL;
4396		}
4397
4398		if (++i == rx_ring->count)
4399			i = 0;
4400
4401		next_rxd = E1000_RX_DESC(*rx_ring, i);
4402		prefetch(next_rxd);
4403
4404		next_buffer = &rx_ring->buffer_info[i];
4405
4406		cleaned = true;
4407		cleaned_count++;
4408
4409		/* !EOP means multiple descriptors were used to store a single
4410		 * packet, if thats the case we need to toss it.  In fact, we
4411		 * to toss every packet with the EOP bit clear and the next
4412		 * frame that _does_ have the EOP bit set, as it is by
4413		 * definition only a frame fragment
4414		 */
4415		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4416			adapter->discarding = true;
4417
4418		if (adapter->discarding) {
4419			/* All receives must fit into a single buffer */
4420			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4421			dev_kfree_skb(skb);
4422			if (status & E1000_RXD_STAT_EOP)
4423				adapter->discarding = false;
4424			goto next_desc;
4425		}
4426
4427		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4428			if (e1000_tbi_should_accept(adapter, status,
4429						    rx_desc->errors,
4430						    length, data)) {
4431				length--;
4432			} else if (netdev->features & NETIF_F_RXALL) {
4433				goto process_skb;
4434			} else {
4435				dev_kfree_skb(skb);
4436				goto next_desc;
4437			}
4438		}
4439
4440process_skb:
4441		total_rx_bytes += (length - 4); /* don't count FCS */
4442		total_rx_packets++;
4443
4444		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4445			/* adjust length to remove Ethernet CRC, this must be
4446			 * done after the TBI_ACCEPT workaround above
4447			 */
4448			length -= 4;
4449
4450		if (buffer_info->rxbuf.data == NULL)
4451			skb_put(skb, length);
4452		else /* copybreak skb */
4453			skb_trim(skb, length);
4454
4455		/* Receive Checksum Offload */
4456		e1000_rx_checksum(adapter,
4457				  (u32)(status) |
4458				  ((u32)(rx_desc->errors) << 24),
4459				  le16_to_cpu(rx_desc->csum), skb);
4460
4461		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4462
4463next_desc:
4464		rx_desc->status = 0;
4465
4466		/* return some buffers to hardware, one at a time is too slow */
4467		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4468			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4469			cleaned_count = 0;
4470		}
4471
4472		/* use prefetched values */
4473		rx_desc = next_rxd;
4474		buffer_info = next_buffer;
4475	}
4476	rx_ring->next_to_clean = i;
4477
4478	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4479	if (cleaned_count)
4480		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4481
4482	adapter->total_rx_packets += total_rx_packets;
4483	adapter->total_rx_bytes += total_rx_bytes;
4484	netdev->stats.rx_bytes += total_rx_bytes;
4485	netdev->stats.rx_packets += total_rx_packets;
4486	return cleaned;
4487}
4488
4489/**
4490 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4491 * @adapter: address of board private structure
4492 * @rx_ring: pointer to receive ring structure
4493 * @cleaned_count: number of buffers to allocate this pass
4494 **/
4495static void
4496e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4497			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4498{
4499	struct pci_dev *pdev = adapter->pdev;
4500	struct e1000_rx_desc *rx_desc;
4501	struct e1000_rx_buffer *buffer_info;
4502	unsigned int i;
4503
4504	i = rx_ring->next_to_use;
4505	buffer_info = &rx_ring->buffer_info[i];
4506
4507	while (cleaned_count--) {
4508		/* allocate a new page if necessary */
4509		if (!buffer_info->rxbuf.page) {
4510			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4511			if (unlikely(!buffer_info->rxbuf.page)) {
4512				adapter->alloc_rx_buff_failed++;
4513				break;
4514			}
4515		}
4516
4517		if (!buffer_info->dma) {
4518			buffer_info->dma = dma_map_page(&pdev->dev,
4519							buffer_info->rxbuf.page, 0,
4520							adapter->rx_buffer_len,
4521							DMA_FROM_DEVICE);
4522			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4523				put_page(buffer_info->rxbuf.page);
4524				buffer_info->rxbuf.page = NULL;
4525				buffer_info->dma = 0;
4526				adapter->alloc_rx_buff_failed++;
4527				break;
4528			}
4529		}
4530
4531		rx_desc = E1000_RX_DESC(*rx_ring, i);
4532		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4533
4534		if (unlikely(++i == rx_ring->count))
4535			i = 0;
4536		buffer_info = &rx_ring->buffer_info[i];
4537	}
4538
4539	if (likely(rx_ring->next_to_use != i)) {
4540		rx_ring->next_to_use = i;
4541		if (unlikely(i-- == 0))
4542			i = (rx_ring->count - 1);
4543
4544		/* Force memory writes to complete before letting h/w
4545		 * know there are new descriptors to fetch.  (Only
4546		 * applicable for weak-ordered memory model archs,
4547		 * such as IA-64).
4548		 */
4549		dma_wmb();
4550		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4551	}
4552}
4553
4554/**
4555 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4556 * @adapter: address of board private structure
4557 * @rx_ring: pointer to ring struct
4558 * @cleaned_count: number of new Rx buffers to try to allocate
4559 **/
4560static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4561				   struct e1000_rx_ring *rx_ring,
4562				   int cleaned_count)
4563{
4564	struct e1000_hw *hw = &adapter->hw;
4565	struct pci_dev *pdev = adapter->pdev;
4566	struct e1000_rx_desc *rx_desc;
4567	struct e1000_rx_buffer *buffer_info;
4568	unsigned int i;
4569	unsigned int bufsz = adapter->rx_buffer_len;
4570
4571	i = rx_ring->next_to_use;
4572	buffer_info = &rx_ring->buffer_info[i];
4573
4574	while (cleaned_count--) {
4575		void *data;
4576
4577		if (buffer_info->rxbuf.data)
4578			goto skip;
4579
4580		data = e1000_alloc_frag(adapter);
4581		if (!data) {
4582			/* Better luck next round */
4583			adapter->alloc_rx_buff_failed++;
4584			break;
4585		}
4586
4587		/* Fix for errata 23, can't cross 64kB boundary */
4588		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4589			void *olddata = data;
4590			e_err(rx_err, "skb align check failed: %u bytes at "
4591			      "%p\n", bufsz, data);
4592			/* Try again, without freeing the previous */
4593			data = e1000_alloc_frag(adapter);
4594			/* Failed allocation, critical failure */
4595			if (!data) {
4596				skb_free_frag(olddata);
4597				adapter->alloc_rx_buff_failed++;
4598				break;
4599			}
4600
4601			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4602				/* give up */
4603				skb_free_frag(data);
4604				skb_free_frag(olddata);
4605				adapter->alloc_rx_buff_failed++;
4606				break;
4607			}
4608
4609			/* Use new allocation */
4610			skb_free_frag(olddata);
4611		}
4612		buffer_info->dma = dma_map_single(&pdev->dev,
4613						  data,
4614						  adapter->rx_buffer_len,
4615						  DMA_FROM_DEVICE);
4616		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4617			skb_free_frag(data);
4618			buffer_info->dma = 0;
4619			adapter->alloc_rx_buff_failed++;
4620			break;
4621		}
4622
4623		/* XXX if it was allocated cleanly it will never map to a
4624		 * boundary crossing
4625		 */
4626
4627		/* Fix for errata 23, can't cross 64kB boundary */
4628		if (!e1000_check_64k_bound(adapter,
4629					(void *)(unsigned long)buffer_info->dma,
4630					adapter->rx_buffer_len)) {
4631			e_err(rx_err, "dma align check failed: %u bytes at "
4632			      "%p\n", adapter->rx_buffer_len,
4633			      (void *)(unsigned long)buffer_info->dma);
4634
4635			dma_unmap_single(&pdev->dev, buffer_info->dma,
4636					 adapter->rx_buffer_len,
4637					 DMA_FROM_DEVICE);
4638
4639			skb_free_frag(data);
4640			buffer_info->rxbuf.data = NULL;
4641			buffer_info->dma = 0;
4642
4643			adapter->alloc_rx_buff_failed++;
4644			break;
4645		}
4646		buffer_info->rxbuf.data = data;
4647 skip:
4648		rx_desc = E1000_RX_DESC(*rx_ring, i);
4649		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4650
4651		if (unlikely(++i == rx_ring->count))
4652			i = 0;
4653		buffer_info = &rx_ring->buffer_info[i];
4654	}
4655
4656	if (likely(rx_ring->next_to_use != i)) {
4657		rx_ring->next_to_use = i;
4658		if (unlikely(i-- == 0))
4659			i = (rx_ring->count - 1);
4660
4661		/* Force memory writes to complete before letting h/w
4662		 * know there are new descriptors to fetch.  (Only
4663		 * applicable for weak-ordered memory model archs,
4664		 * such as IA-64).
4665		 */
4666		dma_wmb();
4667		writel(i, hw->hw_addr + rx_ring->rdt);
4668	}
4669}
4670
4671/**
4672 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4673 * @adapter: address of board private structure
4674 **/
4675static void e1000_smartspeed(struct e1000_adapter *adapter)
4676{
4677	struct e1000_hw *hw = &adapter->hw;
4678	u16 phy_status;
4679	u16 phy_ctrl;
4680
4681	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4682	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4683		return;
4684
4685	if (adapter->smartspeed == 0) {
4686		/* If Master/Slave config fault is asserted twice,
4687		 * we assume back-to-back
4688		 */
4689		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4690		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4691			return;
4692		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4693		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4694			return;
4695		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4696		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4697			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4698			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4699					    phy_ctrl);
4700			adapter->smartspeed++;
4701			if (!e1000_phy_setup_autoneg(hw) &&
4702			   !e1000_read_phy_reg(hw, PHY_CTRL,
4703					       &phy_ctrl)) {
4704				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4705					     MII_CR_RESTART_AUTO_NEG);
4706				e1000_write_phy_reg(hw, PHY_CTRL,
4707						    phy_ctrl);
4708			}
4709		}
4710		return;
4711	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4712		/* If still no link, perhaps using 2/3 pair cable */
4713		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4714		phy_ctrl |= CR_1000T_MS_ENABLE;
4715		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4716		if (!e1000_phy_setup_autoneg(hw) &&
4717		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4718			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4719				     MII_CR_RESTART_AUTO_NEG);
4720			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4721		}
4722	}
4723	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4724	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4725		adapter->smartspeed = 0;
4726}
4727
4728/**
4729 * e1000_ioctl - handle ioctl calls
4730 * @netdev: pointer to our netdev
4731 * @ifr: pointer to interface request structure
4732 * @cmd: ioctl data
4733 **/
4734static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4735{
4736	switch (cmd) {
4737	case SIOCGMIIPHY:
4738	case SIOCGMIIREG:
4739	case SIOCSMIIREG:
4740		return e1000_mii_ioctl(netdev, ifr, cmd);
4741	default:
4742		return -EOPNOTSUPP;
4743	}
4744}
4745
4746/**
4747 * e1000_mii_ioctl -
4748 * @netdev: pointer to our netdev
4749 * @ifr: pointer to interface request structure
4750 * @cmd: ioctl data
4751 **/
4752static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4753			   int cmd)
4754{
4755	struct e1000_adapter *adapter = netdev_priv(netdev);
4756	struct e1000_hw *hw = &adapter->hw;
4757	struct mii_ioctl_data *data = if_mii(ifr);
4758	int retval;
4759	u16 mii_reg;
4760	unsigned long flags;
4761
4762	if (hw->media_type != e1000_media_type_copper)
4763		return -EOPNOTSUPP;
4764
4765	switch (cmd) {
4766	case SIOCGMIIPHY:
4767		data->phy_id = hw->phy_addr;
4768		break;
4769	case SIOCGMIIREG:
4770		spin_lock_irqsave(&adapter->stats_lock, flags);
4771		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4772				   &data->val_out)) {
4773			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774			return -EIO;
4775		}
4776		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777		break;
4778	case SIOCSMIIREG:
4779		if (data->reg_num & ~(0x1F))
4780			return -EFAULT;
4781		mii_reg = data->val_in;
4782		spin_lock_irqsave(&adapter->stats_lock, flags);
4783		if (e1000_write_phy_reg(hw, data->reg_num,
4784					mii_reg)) {
4785			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786			return -EIO;
4787		}
4788		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4789		if (hw->media_type == e1000_media_type_copper) {
4790			switch (data->reg_num) {
4791			case PHY_CTRL:
4792				if (mii_reg & MII_CR_POWER_DOWN)
4793					break;
4794				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4795					hw->autoneg = 1;
4796					hw->autoneg_advertised = 0x2F;
4797				} else {
4798					u32 speed;
4799					if (mii_reg & 0x40)
4800						speed = SPEED_1000;
4801					else if (mii_reg & 0x2000)
4802						speed = SPEED_100;
4803					else
4804						speed = SPEED_10;
4805					retval = e1000_set_spd_dplx(
4806						adapter, speed,
4807						((mii_reg & 0x100)
4808						 ? DUPLEX_FULL :
4809						 DUPLEX_HALF));
4810					if (retval)
4811						return retval;
4812				}
4813				if (netif_running(adapter->netdev))
4814					e1000_reinit_locked(adapter);
4815				else
4816					e1000_reset(adapter);
4817				break;
4818			case M88E1000_PHY_SPEC_CTRL:
4819			case M88E1000_EXT_PHY_SPEC_CTRL:
4820				if (e1000_phy_reset(hw))
4821					return -EIO;
4822				break;
4823			}
4824		} else {
4825			switch (data->reg_num) {
4826			case PHY_CTRL:
4827				if (mii_reg & MII_CR_POWER_DOWN)
4828					break;
4829				if (netif_running(adapter->netdev))
4830					e1000_reinit_locked(adapter);
4831				else
4832					e1000_reset(adapter);
4833				break;
4834			}
4835		}
4836		break;
4837	default:
4838		return -EOPNOTSUPP;
4839	}
4840	return E1000_SUCCESS;
4841}
4842
4843void e1000_pci_set_mwi(struct e1000_hw *hw)
4844{
4845	struct e1000_adapter *adapter = hw->back;
4846	int ret_val = pci_set_mwi(adapter->pdev);
4847
4848	if (ret_val)
4849		e_err(probe, "Error in setting MWI\n");
4850}
4851
4852void e1000_pci_clear_mwi(struct e1000_hw *hw)
4853{
4854	struct e1000_adapter *adapter = hw->back;
4855
4856	pci_clear_mwi(adapter->pdev);
4857}
4858
4859int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4860{
4861	struct e1000_adapter *adapter = hw->back;
4862	return pcix_get_mmrbc(adapter->pdev);
4863}
4864
4865void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4866{
4867	struct e1000_adapter *adapter = hw->back;
4868	pcix_set_mmrbc(adapter->pdev, mmrbc);
4869}
4870
4871void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4872{
4873	outl(value, port);
4874}
4875
4876static bool e1000_vlan_used(struct e1000_adapter *adapter)
4877{
4878	u16 vid;
4879
4880	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4881		return true;
4882	return false;
4883}
4884
4885static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4886			      netdev_features_t features)
4887{
4888	struct e1000_hw *hw = &adapter->hw;
4889	u32 ctrl;
4890
4891	ctrl = er32(CTRL);
4892	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4893		/* enable VLAN tag insert/strip */
4894		ctrl |= E1000_CTRL_VME;
4895	} else {
4896		/* disable VLAN tag insert/strip */
4897		ctrl &= ~E1000_CTRL_VME;
4898	}
4899	ew32(CTRL, ctrl);
4900}
4901static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4902				     bool filter_on)
4903{
4904	struct e1000_hw *hw = &adapter->hw;
4905	u32 rctl;
4906
4907	if (!test_bit(__E1000_DOWN, &adapter->flags))
4908		e1000_irq_disable(adapter);
4909
4910	__e1000_vlan_mode(adapter, adapter->netdev->features);
4911	if (filter_on) {
4912		/* enable VLAN receive filtering */
4913		rctl = er32(RCTL);
4914		rctl &= ~E1000_RCTL_CFIEN;
4915		if (!(adapter->netdev->flags & IFF_PROMISC))
4916			rctl |= E1000_RCTL_VFE;
4917		ew32(RCTL, rctl);
4918		e1000_update_mng_vlan(adapter);
4919	} else {
4920		/* disable VLAN receive filtering */
4921		rctl = er32(RCTL);
4922		rctl &= ~E1000_RCTL_VFE;
4923		ew32(RCTL, rctl);
4924	}
4925
4926	if (!test_bit(__E1000_DOWN, &adapter->flags))
4927		e1000_irq_enable(adapter);
4928}
4929
4930static void e1000_vlan_mode(struct net_device *netdev,
4931			    netdev_features_t features)
4932{
4933	struct e1000_adapter *adapter = netdev_priv(netdev);
4934
4935	if (!test_bit(__E1000_DOWN, &adapter->flags))
4936		e1000_irq_disable(adapter);
4937
4938	__e1000_vlan_mode(adapter, features);
4939
4940	if (!test_bit(__E1000_DOWN, &adapter->flags))
4941		e1000_irq_enable(adapter);
4942}
4943
4944static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4945				 __be16 proto, u16 vid)
4946{
4947	struct e1000_adapter *adapter = netdev_priv(netdev);
4948	struct e1000_hw *hw = &adapter->hw;
4949	u32 vfta, index;
4950
4951	if ((hw->mng_cookie.status &
4952	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4953	    (vid == adapter->mng_vlan_id))
4954		return 0;
4955
4956	if (!e1000_vlan_used(adapter))
4957		e1000_vlan_filter_on_off(adapter, true);
4958
4959	/* add VID to filter table */
4960	index = (vid >> 5) & 0x7F;
4961	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4962	vfta |= (1 << (vid & 0x1F));
4963	e1000_write_vfta(hw, index, vfta);
4964
4965	set_bit(vid, adapter->active_vlans);
4966
4967	return 0;
4968}
4969
4970static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4971				  __be16 proto, u16 vid)
4972{
4973	struct e1000_adapter *adapter = netdev_priv(netdev);
4974	struct e1000_hw *hw = &adapter->hw;
4975	u32 vfta, index;
4976
4977	if (!test_bit(__E1000_DOWN, &adapter->flags))
4978		e1000_irq_disable(adapter);
4979	if (!test_bit(__E1000_DOWN, &adapter->flags))
4980		e1000_irq_enable(adapter);
4981
4982	/* remove VID from filter table */
4983	index = (vid >> 5) & 0x7F;
4984	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4985	vfta &= ~(1 << (vid & 0x1F));
4986	e1000_write_vfta(hw, index, vfta);
4987
4988	clear_bit(vid, adapter->active_vlans);
4989
4990	if (!e1000_vlan_used(adapter))
4991		e1000_vlan_filter_on_off(adapter, false);
4992
4993	return 0;
4994}
4995
4996static void e1000_restore_vlan(struct e1000_adapter *adapter)
4997{
4998	u16 vid;
4999
5000	if (!e1000_vlan_used(adapter))
5001		return;
5002
5003	e1000_vlan_filter_on_off(adapter, true);
5004	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5005		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5006}
5007
5008int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5009{
5010	struct e1000_hw *hw = &adapter->hw;
5011
5012	hw->autoneg = 0;
5013
5014	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5015	 * for the switch() below to work
5016	 */
5017	if ((spd & 1) || (dplx & ~1))
5018		goto err_inval;
5019
5020	/* Fiber NICs only allow 1000 gbps Full duplex */
5021	if ((hw->media_type == e1000_media_type_fiber) &&
5022	    spd != SPEED_1000 &&
5023	    dplx != DUPLEX_FULL)
5024		goto err_inval;
5025
5026	switch (spd + dplx) {
5027	case SPEED_10 + DUPLEX_HALF:
5028		hw->forced_speed_duplex = e1000_10_half;
5029		break;
5030	case SPEED_10 + DUPLEX_FULL:
5031		hw->forced_speed_duplex = e1000_10_full;
5032		break;
5033	case SPEED_100 + DUPLEX_HALF:
5034		hw->forced_speed_duplex = e1000_100_half;
5035		break;
5036	case SPEED_100 + DUPLEX_FULL:
5037		hw->forced_speed_duplex = e1000_100_full;
5038		break;
5039	case SPEED_1000 + DUPLEX_FULL:
5040		hw->autoneg = 1;
5041		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5042		break;
5043	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5044	default:
5045		goto err_inval;
5046	}
5047
5048	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5049	hw->mdix = AUTO_ALL_MODES;
5050
5051	return 0;
5052
5053err_inval:
5054	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5055	return -EINVAL;
5056}
5057
5058static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5059{
5060	struct net_device *netdev = pci_get_drvdata(pdev);
5061	struct e1000_adapter *adapter = netdev_priv(netdev);
5062	struct e1000_hw *hw = &adapter->hw;
5063	u32 ctrl, ctrl_ext, rctl, status;
5064	u32 wufc = adapter->wol;
5065
5066	netif_device_detach(netdev);
5067
5068	if (netif_running(netdev)) {
5069		int count = E1000_CHECK_RESET_COUNT;
5070
5071		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5072			usleep_range(10000, 20000);
5073
5074		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5075		e1000_down(adapter);
5076	}
5077
5078	status = er32(STATUS);
5079	if (status & E1000_STATUS_LU)
5080		wufc &= ~E1000_WUFC_LNKC;
5081
5082	if (wufc) {
5083		e1000_setup_rctl(adapter);
5084		e1000_set_rx_mode(netdev);
5085
5086		rctl = er32(RCTL);
5087
5088		/* turn on all-multi mode if wake on multicast is enabled */
5089		if (wufc & E1000_WUFC_MC)
5090			rctl |= E1000_RCTL_MPE;
5091
5092		/* enable receives in the hardware */
5093		ew32(RCTL, rctl | E1000_RCTL_EN);
5094
5095		if (hw->mac_type >= e1000_82540) {
5096			ctrl = er32(CTRL);
5097			/* advertise wake from D3Cold */
5098			#define E1000_CTRL_ADVD3WUC 0x00100000
5099			/* phy power management enable */
5100			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5101			ctrl |= E1000_CTRL_ADVD3WUC |
5102				E1000_CTRL_EN_PHY_PWR_MGMT;
5103			ew32(CTRL, ctrl);
5104		}
5105
5106		if (hw->media_type == e1000_media_type_fiber ||
5107		    hw->media_type == e1000_media_type_internal_serdes) {
5108			/* keep the laser running in D3 */
5109			ctrl_ext = er32(CTRL_EXT);
5110			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5111			ew32(CTRL_EXT, ctrl_ext);
5112		}
5113
5114		ew32(WUC, E1000_WUC_PME_EN);
5115		ew32(WUFC, wufc);
5116	} else {
5117		ew32(WUC, 0);
5118		ew32(WUFC, 0);
5119	}
5120
5121	e1000_release_manageability(adapter);
5122
5123	*enable_wake = !!wufc;
5124
5125	/* make sure adapter isn't asleep if manageability is enabled */
5126	if (adapter->en_mng_pt)
5127		*enable_wake = true;
5128
5129	if (netif_running(netdev))
5130		e1000_free_irq(adapter);
5131
5132	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5133		pci_disable_device(pdev);
5134
5135	return 0;
5136}
5137
5138static int __maybe_unused e1000_suspend(struct device *dev)
5139{
5140	int retval;
5141	struct pci_dev *pdev = to_pci_dev(dev);
5142	bool wake;
5143
5144	retval = __e1000_shutdown(pdev, &wake);
5145	device_set_wakeup_enable(dev, wake);
5146
5147	return retval;
5148}
5149
5150static int __maybe_unused e1000_resume(struct device *dev)
5151{
5152	struct pci_dev *pdev = to_pci_dev(dev);
5153	struct net_device *netdev = pci_get_drvdata(pdev);
5154	struct e1000_adapter *adapter = netdev_priv(netdev);
5155	struct e1000_hw *hw = &adapter->hw;
5156	u32 err;
5157
5158	if (adapter->need_ioport)
5159		err = pci_enable_device(pdev);
5160	else
5161		err = pci_enable_device_mem(pdev);
5162	if (err) {
5163		pr_err("Cannot enable PCI device from suspend\n");
5164		return err;
5165	}
5166
5167	/* flush memory to make sure state is correct */
5168	smp_mb__before_atomic();
5169	clear_bit(__E1000_DISABLED, &adapter->flags);
5170	pci_set_master(pdev);
5171
5172	pci_enable_wake(pdev, PCI_D3hot, 0);
5173	pci_enable_wake(pdev, PCI_D3cold, 0);
5174
5175	if (netif_running(netdev)) {
5176		err = e1000_request_irq(adapter);
5177		if (err)
5178			return err;
5179	}
5180
5181	e1000_power_up_phy(adapter);
5182	e1000_reset(adapter);
5183	ew32(WUS, ~0);
5184
5185	e1000_init_manageability(adapter);
5186
5187	if (netif_running(netdev))
5188		e1000_up(adapter);
5189
5190	netif_device_attach(netdev);
5191
5192	return 0;
5193}
5194
5195static void e1000_shutdown(struct pci_dev *pdev)
5196{
5197	bool wake;
5198
5199	__e1000_shutdown(pdev, &wake);
5200
5201	if (system_state == SYSTEM_POWER_OFF) {
5202		pci_wake_from_d3(pdev, wake);
5203		pci_set_power_state(pdev, PCI_D3hot);
5204	}
5205}
5206
5207#ifdef CONFIG_NET_POLL_CONTROLLER
5208/* Polling 'interrupt' - used by things like netconsole to send skbs
5209 * without having to re-enable interrupts. It's not called while
5210 * the interrupt routine is executing.
5211 */
5212static void e1000_netpoll(struct net_device *netdev)
5213{
5214	struct e1000_adapter *adapter = netdev_priv(netdev);
5215
5216	if (disable_hardirq(adapter->pdev->irq))
5217		e1000_intr(adapter->pdev->irq, netdev);
5218	enable_irq(adapter->pdev->irq);
5219}
5220#endif
5221
5222/**
5223 * e1000_io_error_detected - called when PCI error is detected
5224 * @pdev: Pointer to PCI device
5225 * @state: The current pci connection state
5226 *
5227 * This function is called after a PCI bus error affecting
5228 * this device has been detected.
5229 */
5230static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5231						pci_channel_state_t state)
5232{
5233	struct net_device *netdev = pci_get_drvdata(pdev);
5234	struct e1000_adapter *adapter = netdev_priv(netdev);
5235
5236	netif_device_detach(netdev);
5237
5238	if (state == pci_channel_io_perm_failure)
5239		return PCI_ERS_RESULT_DISCONNECT;
5240
5241	if (netif_running(netdev))
5242		e1000_down(adapter);
5243
5244	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5245		pci_disable_device(pdev);
5246
5247	/* Request a slot reset. */
5248	return PCI_ERS_RESULT_NEED_RESET;
5249}
5250
5251/**
5252 * e1000_io_slot_reset - called after the pci bus has been reset.
5253 * @pdev: Pointer to PCI device
5254 *
5255 * Restart the card from scratch, as if from a cold-boot. Implementation
5256 * resembles the first-half of the e1000_resume routine.
5257 */
5258static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5259{
5260	struct net_device *netdev = pci_get_drvdata(pdev);
5261	struct e1000_adapter *adapter = netdev_priv(netdev);
5262	struct e1000_hw *hw = &adapter->hw;
5263	int err;
5264
5265	if (adapter->need_ioport)
5266		err = pci_enable_device(pdev);
5267	else
5268		err = pci_enable_device_mem(pdev);
5269	if (err) {
5270		pr_err("Cannot re-enable PCI device after reset.\n");
5271		return PCI_ERS_RESULT_DISCONNECT;
5272	}
5273
5274	/* flush memory to make sure state is correct */
5275	smp_mb__before_atomic();
5276	clear_bit(__E1000_DISABLED, &adapter->flags);
5277	pci_set_master(pdev);
5278
5279	pci_enable_wake(pdev, PCI_D3hot, 0);
5280	pci_enable_wake(pdev, PCI_D3cold, 0);
5281
5282	e1000_reset(adapter);
5283	ew32(WUS, ~0);
5284
5285	return PCI_ERS_RESULT_RECOVERED;
5286}
5287
5288/**
5289 * e1000_io_resume - called when traffic can start flowing again.
5290 * @pdev: Pointer to PCI device
5291 *
5292 * This callback is called when the error recovery driver tells us that
5293 * its OK to resume normal operation. Implementation resembles the
5294 * second-half of the e1000_resume routine.
5295 */
5296static void e1000_io_resume(struct pci_dev *pdev)
5297{
5298	struct net_device *netdev = pci_get_drvdata(pdev);
5299	struct e1000_adapter *adapter = netdev_priv(netdev);
5300
5301	e1000_init_manageability(adapter);
5302
5303	if (netif_running(netdev)) {
5304		if (e1000_up(adapter)) {
5305			pr_info("can't bring device back up after reset\n");
5306			return;
5307		}
5308	}
5309
5310	netif_device_attach(netdev);
5311}
5312
5313/* e1000_main.c */
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2006 Intel Corporation. */
   3
   4#include "e1000.h"
   5#include <net/ip6_checksum.h>
   6#include <linux/io.h>
   7#include <linux/prefetch.h>
   8#include <linux/bitops.h>
   9#include <linux/if_vlan.h>
  10
  11char e1000_driver_name[] = "e1000";
  12static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  13static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  14
  15/* e1000_pci_tbl - PCI Device ID Table
  16 *
  17 * Last entry must be all 0s
  18 *
  19 * Macro expands to...
  20 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  21 */
  22static const struct pci_device_id e1000_pci_tbl[] = {
  23	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  24	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  25	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  26	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  27	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  28	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  29	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  30	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  31	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  32	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  33	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  34	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  35	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  36	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  37	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  38	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  39	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  40	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  41	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  42	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  43	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  44	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  45	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  46	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  47	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  48	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  49	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  50	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  51	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  53	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  54	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  55	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  56	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  57	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  58	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  59	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  60	/* required last entry */
  61	{0,}
  62};
  63
  64MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  65
  66int e1000_up(struct e1000_adapter *adapter);
  67void e1000_down(struct e1000_adapter *adapter);
  68void e1000_reinit_locked(struct e1000_adapter *adapter);
  69void e1000_reset(struct e1000_adapter *adapter);
  70int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  71int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  72void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  73void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  74static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  75				    struct e1000_tx_ring *txdr);
  76static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  77				    struct e1000_rx_ring *rxdr);
  78static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  79				    struct e1000_tx_ring *tx_ring);
  80static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  81				    struct e1000_rx_ring *rx_ring);
  82void e1000_update_stats(struct e1000_adapter *adapter);
  83
  84static int e1000_init_module(void);
  85static void e1000_exit_module(void);
  86static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  87static void e1000_remove(struct pci_dev *pdev);
  88static int e1000_alloc_queues(struct e1000_adapter *adapter);
  89static int e1000_sw_init(struct e1000_adapter *adapter);
  90int e1000_open(struct net_device *netdev);
  91int e1000_close(struct net_device *netdev);
  92static void e1000_configure_tx(struct e1000_adapter *adapter);
  93static void e1000_configure_rx(struct e1000_adapter *adapter);
  94static void e1000_setup_rctl(struct e1000_adapter *adapter);
  95static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  96static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  97static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  98				struct e1000_tx_ring *tx_ring);
  99static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 100				struct e1000_rx_ring *rx_ring);
 101static void e1000_set_rx_mode(struct net_device *netdev);
 102static void e1000_update_phy_info_task(struct work_struct *work);
 103static void e1000_watchdog(struct work_struct *work);
 104static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 106				    struct net_device *netdev);
 107static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 108static int e1000_set_mac(struct net_device *netdev, void *p);
 109static irqreturn_t e1000_intr(int irq, void *data);
 110static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 111			       struct e1000_tx_ring *tx_ring);
 112static int e1000_clean(struct napi_struct *napi, int budget);
 113static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 114			       struct e1000_rx_ring *rx_ring,
 115			       int *work_done, int work_to_do);
 116static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 117				     struct e1000_rx_ring *rx_ring,
 118				     int *work_done, int work_to_do);
 119static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 120					 struct e1000_rx_ring *rx_ring,
 121					 int cleaned_count)
 122{
 123}
 124static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 125				   struct e1000_rx_ring *rx_ring,
 126				   int cleaned_count);
 127static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 128					 struct e1000_rx_ring *rx_ring,
 129					 int cleaned_count);
 130static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 131static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 132			   int cmd);
 133static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 134static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 135static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
 136static void e1000_reset_task(struct work_struct *work);
 137static void e1000_smartspeed(struct e1000_adapter *adapter);
 138static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 139				       struct sk_buff *skb);
 140
 141static bool e1000_vlan_used(struct e1000_adapter *adapter);
 142static void e1000_vlan_mode(struct net_device *netdev,
 143			    netdev_features_t features);
 144static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 145				     bool filter_on);
 146static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 147				 __be16 proto, u16 vid);
 148static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 149				  __be16 proto, u16 vid);
 150static void e1000_restore_vlan(struct e1000_adapter *adapter);
 151
 152static int __maybe_unused e1000_suspend(struct device *dev);
 153static int __maybe_unused e1000_resume(struct device *dev);
 154static void e1000_shutdown(struct pci_dev *pdev);
 155
 156#ifdef CONFIG_NET_POLL_CONTROLLER
 157/* for netdump / net console */
 158static void e1000_netpoll (struct net_device *netdev);
 159#endif
 160
 161#define COPYBREAK_DEFAULT 256
 162static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 163module_param(copybreak, uint, 0644);
 164MODULE_PARM_DESC(copybreak,
 165	"Maximum size of packet that is copied to a new buffer on receive");
 166
 167static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 168						pci_channel_state_t state);
 169static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 170static void e1000_io_resume(struct pci_dev *pdev);
 171
 172static const struct pci_error_handlers e1000_err_handler = {
 173	.error_detected = e1000_io_error_detected,
 174	.slot_reset = e1000_io_slot_reset,
 175	.resume = e1000_io_resume,
 176};
 177
 178static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
 179
 180static struct pci_driver e1000_driver = {
 181	.name     = e1000_driver_name,
 182	.id_table = e1000_pci_tbl,
 183	.probe    = e1000_probe,
 184	.remove   = e1000_remove,
 185	.driver = {
 186		.pm = &e1000_pm_ops,
 187	},
 188	.shutdown = e1000_shutdown,
 189	.err_handler = &e1000_err_handler
 190};
 191
 192MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 193MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 194MODULE_LICENSE("GPL v2");
 195
 196#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 197static int debug = -1;
 198module_param(debug, int, 0);
 199MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 200
 201/**
 202 * e1000_get_hw_dev - return device
 203 * used by hardware layer to print debugging information
 
 
 204 *
 205 **/
 206struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 207{
 208	struct e1000_adapter *adapter = hw->back;
 209	return adapter->netdev;
 210}
 211
 212/**
 213 * e1000_init_module - Driver Registration Routine
 214 *
 215 * e1000_init_module is the first routine called when the driver is
 216 * loaded. All it does is register with the PCI subsystem.
 217 **/
 218static int __init e1000_init_module(void)
 219{
 220	int ret;
 221	pr_info("%s\n", e1000_driver_string);
 222
 223	pr_info("%s\n", e1000_copyright);
 224
 225	ret = pci_register_driver(&e1000_driver);
 226	if (copybreak != COPYBREAK_DEFAULT) {
 227		if (copybreak == 0)
 228			pr_info("copybreak disabled\n");
 229		else
 230			pr_info("copybreak enabled for "
 231				   "packets <= %u bytes\n", copybreak);
 232	}
 233	return ret;
 234}
 235
 236module_init(e1000_init_module);
 237
 238/**
 239 * e1000_exit_module - Driver Exit Cleanup Routine
 240 *
 241 * e1000_exit_module is called just before the driver is removed
 242 * from memory.
 243 **/
 244static void __exit e1000_exit_module(void)
 245{
 246	pci_unregister_driver(&e1000_driver);
 247}
 248
 249module_exit(e1000_exit_module);
 250
 251static int e1000_request_irq(struct e1000_adapter *adapter)
 252{
 253	struct net_device *netdev = adapter->netdev;
 254	irq_handler_t handler = e1000_intr;
 255	int irq_flags = IRQF_SHARED;
 256	int err;
 257
 258	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 259			  netdev);
 260	if (err) {
 261		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 262	}
 263
 264	return err;
 265}
 266
 267static void e1000_free_irq(struct e1000_adapter *adapter)
 268{
 269	struct net_device *netdev = adapter->netdev;
 270
 271	free_irq(adapter->pdev->irq, netdev);
 272}
 273
 274/**
 275 * e1000_irq_disable - Mask off interrupt generation on the NIC
 276 * @adapter: board private structure
 277 **/
 278static void e1000_irq_disable(struct e1000_adapter *adapter)
 279{
 280	struct e1000_hw *hw = &adapter->hw;
 281
 282	ew32(IMC, ~0);
 283	E1000_WRITE_FLUSH();
 284	synchronize_irq(adapter->pdev->irq);
 285}
 286
 287/**
 288 * e1000_irq_enable - Enable default interrupt generation settings
 289 * @adapter: board private structure
 290 **/
 291static void e1000_irq_enable(struct e1000_adapter *adapter)
 292{
 293	struct e1000_hw *hw = &adapter->hw;
 294
 295	ew32(IMS, IMS_ENABLE_MASK);
 296	E1000_WRITE_FLUSH();
 297}
 298
 299static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 300{
 301	struct e1000_hw *hw = &adapter->hw;
 302	struct net_device *netdev = adapter->netdev;
 303	u16 vid = hw->mng_cookie.vlan_id;
 304	u16 old_vid = adapter->mng_vlan_id;
 305
 306	if (!e1000_vlan_used(adapter))
 307		return;
 308
 309	if (!test_bit(vid, adapter->active_vlans)) {
 310		if (hw->mng_cookie.status &
 311		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 312			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 313			adapter->mng_vlan_id = vid;
 314		} else {
 315			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 316		}
 317		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 318		    (vid != old_vid) &&
 319		    !test_bit(old_vid, adapter->active_vlans))
 320			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 321					       old_vid);
 322	} else {
 323		adapter->mng_vlan_id = vid;
 324	}
 325}
 326
 327static void e1000_init_manageability(struct e1000_adapter *adapter)
 328{
 329	struct e1000_hw *hw = &adapter->hw;
 330
 331	if (adapter->en_mng_pt) {
 332		u32 manc = er32(MANC);
 333
 334		/* disable hardware interception of ARP */
 335		manc &= ~(E1000_MANC_ARP_EN);
 336
 337		ew32(MANC, manc);
 338	}
 339}
 340
 341static void e1000_release_manageability(struct e1000_adapter *adapter)
 342{
 343	struct e1000_hw *hw = &adapter->hw;
 344
 345	if (adapter->en_mng_pt) {
 346		u32 manc = er32(MANC);
 347
 348		/* re-enable hardware interception of ARP */
 349		manc |= E1000_MANC_ARP_EN;
 350
 351		ew32(MANC, manc);
 352	}
 353}
 354
 355/**
 356 * e1000_configure - configure the hardware for RX and TX
 357 * @adapter = private board structure
 358 **/
 359static void e1000_configure(struct e1000_adapter *adapter)
 360{
 361	struct net_device *netdev = adapter->netdev;
 362	int i;
 363
 364	e1000_set_rx_mode(netdev);
 365
 366	e1000_restore_vlan(adapter);
 367	e1000_init_manageability(adapter);
 368
 369	e1000_configure_tx(adapter);
 370	e1000_setup_rctl(adapter);
 371	e1000_configure_rx(adapter);
 372	/* call E1000_DESC_UNUSED which always leaves
 373	 * at least 1 descriptor unused to make sure
 374	 * next_to_use != next_to_clean
 375	 */
 376	for (i = 0; i < adapter->num_rx_queues; i++) {
 377		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 378		adapter->alloc_rx_buf(adapter, ring,
 379				      E1000_DESC_UNUSED(ring));
 380	}
 381}
 382
 383int e1000_up(struct e1000_adapter *adapter)
 384{
 385	struct e1000_hw *hw = &adapter->hw;
 386
 387	/* hardware has been reset, we need to reload some things */
 388	e1000_configure(adapter);
 389
 390	clear_bit(__E1000_DOWN, &adapter->flags);
 391
 392	napi_enable(&adapter->napi);
 393
 394	e1000_irq_enable(adapter);
 395
 396	netif_wake_queue(adapter->netdev);
 397
 398	/* fire a link change interrupt to start the watchdog */
 399	ew32(ICS, E1000_ICS_LSC);
 400	return 0;
 401}
 402
 403/**
 404 * e1000_power_up_phy - restore link in case the phy was powered down
 405 * @adapter: address of board private structure
 406 *
 407 * The phy may be powered down to save power and turn off link when the
 408 * driver is unloaded and wake on lan is not enabled (among others)
 409 * *** this routine MUST be followed by a call to e1000_reset ***
 410 **/
 411void e1000_power_up_phy(struct e1000_adapter *adapter)
 412{
 413	struct e1000_hw *hw = &adapter->hw;
 414	u16 mii_reg = 0;
 415
 416	/* Just clear the power down bit to wake the phy back up */
 417	if (hw->media_type == e1000_media_type_copper) {
 418		/* according to the manual, the phy will retain its
 419		 * settings across a power-down/up cycle
 420		 */
 421		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 422		mii_reg &= ~MII_CR_POWER_DOWN;
 423		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 424	}
 425}
 426
 427static void e1000_power_down_phy(struct e1000_adapter *adapter)
 428{
 429	struct e1000_hw *hw = &adapter->hw;
 430
 431	/* Power down the PHY so no link is implied when interface is down *
 432	 * The PHY cannot be powered down if any of the following is true *
 433	 * (a) WoL is enabled
 434	 * (b) AMT is active
 435	 * (c) SoL/IDER session is active
 436	 */
 437	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 438	   hw->media_type == e1000_media_type_copper) {
 439		u16 mii_reg = 0;
 440
 441		switch (hw->mac_type) {
 442		case e1000_82540:
 443		case e1000_82545:
 444		case e1000_82545_rev_3:
 445		case e1000_82546:
 446		case e1000_ce4100:
 447		case e1000_82546_rev_3:
 448		case e1000_82541:
 449		case e1000_82541_rev_2:
 450		case e1000_82547:
 451		case e1000_82547_rev_2:
 452			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 453				goto out;
 454			break;
 455		default:
 456			goto out;
 457		}
 458		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 459		mii_reg |= MII_CR_POWER_DOWN;
 460		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 461		msleep(1);
 462	}
 463out:
 464	return;
 465}
 466
 467static void e1000_down_and_stop(struct e1000_adapter *adapter)
 468{
 469	set_bit(__E1000_DOWN, &adapter->flags);
 470
 471	cancel_delayed_work_sync(&adapter->watchdog_task);
 472
 473	/*
 474	 * Since the watchdog task can reschedule other tasks, we should cancel
 475	 * it first, otherwise we can run into the situation when a work is
 476	 * still running after the adapter has been turned down.
 477	 */
 478
 479	cancel_delayed_work_sync(&adapter->phy_info_task);
 480	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 481
 482	/* Only kill reset task if adapter is not resetting */
 483	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 484		cancel_work_sync(&adapter->reset_task);
 485}
 486
 487void e1000_down(struct e1000_adapter *adapter)
 488{
 489	struct e1000_hw *hw = &adapter->hw;
 490	struct net_device *netdev = adapter->netdev;
 491	u32 rctl, tctl;
 492
 493	/* disable receives in the hardware */
 494	rctl = er32(RCTL);
 495	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 496	/* flush and sleep below */
 497
 498	netif_tx_disable(netdev);
 499
 500	/* disable transmits in the hardware */
 501	tctl = er32(TCTL);
 502	tctl &= ~E1000_TCTL_EN;
 503	ew32(TCTL, tctl);
 504	/* flush both disables and wait for them to finish */
 505	E1000_WRITE_FLUSH();
 506	msleep(10);
 507
 508	/* Set the carrier off after transmits have been disabled in the
 509	 * hardware, to avoid race conditions with e1000_watchdog() (which
 510	 * may be running concurrently to us, checking for the carrier
 511	 * bit to decide whether it should enable transmits again). Such
 512	 * a race condition would result into transmission being disabled
 513	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 514	 */
 515	netif_carrier_off(netdev);
 516
 517	napi_disable(&adapter->napi);
 518
 519	e1000_irq_disable(adapter);
 520
 521	/* Setting DOWN must be after irq_disable to prevent
 522	 * a screaming interrupt.  Setting DOWN also prevents
 523	 * tasks from rescheduling.
 524	 */
 525	e1000_down_and_stop(adapter);
 526
 527	adapter->link_speed = 0;
 528	adapter->link_duplex = 0;
 529
 530	e1000_reset(adapter);
 531	e1000_clean_all_tx_rings(adapter);
 532	e1000_clean_all_rx_rings(adapter);
 533}
 534
 535void e1000_reinit_locked(struct e1000_adapter *adapter)
 536{
 537	WARN_ON(in_interrupt());
 538	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 539		msleep(1);
 540
 541	/* only run the task if not already down */
 542	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
 543		e1000_down(adapter);
 544		e1000_up(adapter);
 545	}
 546
 547	clear_bit(__E1000_RESETTING, &adapter->flags);
 548}
 549
 550void e1000_reset(struct e1000_adapter *adapter)
 551{
 552	struct e1000_hw *hw = &adapter->hw;
 553	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 554	bool legacy_pba_adjust = false;
 555	u16 hwm;
 556
 557	/* Repartition Pba for greater than 9k mtu
 558	 * To take effect CTRL.RST is required.
 559	 */
 560
 561	switch (hw->mac_type) {
 562	case e1000_82542_rev2_0:
 563	case e1000_82542_rev2_1:
 564	case e1000_82543:
 565	case e1000_82544:
 566	case e1000_82540:
 567	case e1000_82541:
 568	case e1000_82541_rev_2:
 569		legacy_pba_adjust = true;
 570		pba = E1000_PBA_48K;
 571		break;
 572	case e1000_82545:
 573	case e1000_82545_rev_3:
 574	case e1000_82546:
 575	case e1000_ce4100:
 576	case e1000_82546_rev_3:
 577		pba = E1000_PBA_48K;
 578		break;
 579	case e1000_82547:
 580	case e1000_82547_rev_2:
 581		legacy_pba_adjust = true;
 582		pba = E1000_PBA_30K;
 583		break;
 584	case e1000_undefined:
 585	case e1000_num_macs:
 586		break;
 587	}
 588
 589	if (legacy_pba_adjust) {
 590		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 591			pba -= 8; /* allocate more FIFO for Tx */
 592
 593		if (hw->mac_type == e1000_82547) {
 594			adapter->tx_fifo_head = 0;
 595			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 596			adapter->tx_fifo_size =
 597				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 598			atomic_set(&adapter->tx_fifo_stall, 0);
 599		}
 600	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 601		/* adjust PBA for jumbo frames */
 602		ew32(PBA, pba);
 603
 604		/* To maintain wire speed transmits, the Tx FIFO should be
 605		 * large enough to accommodate two full transmit packets,
 606		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 607		 * the Rx FIFO should be large enough to accommodate at least
 608		 * one full receive packet and is similarly rounded up and
 609		 * expressed in KB.
 610		 */
 611		pba = er32(PBA);
 612		/* upper 16 bits has Tx packet buffer allocation size in KB */
 613		tx_space = pba >> 16;
 614		/* lower 16 bits has Rx packet buffer allocation size in KB */
 615		pba &= 0xffff;
 616		/* the Tx fifo also stores 16 bytes of information about the Tx
 617		 * but don't include ethernet FCS because hardware appends it
 618		 */
 619		min_tx_space = (hw->max_frame_size +
 620				sizeof(struct e1000_tx_desc) -
 621				ETH_FCS_LEN) * 2;
 622		min_tx_space = ALIGN(min_tx_space, 1024);
 623		min_tx_space >>= 10;
 624		/* software strips receive CRC, so leave room for it */
 625		min_rx_space = hw->max_frame_size;
 626		min_rx_space = ALIGN(min_rx_space, 1024);
 627		min_rx_space >>= 10;
 628
 629		/* If current Tx allocation is less than the min Tx FIFO size,
 630		 * and the min Tx FIFO size is less than the current Rx FIFO
 631		 * allocation, take space away from current Rx allocation
 632		 */
 633		if (tx_space < min_tx_space &&
 634		    ((min_tx_space - tx_space) < pba)) {
 635			pba = pba - (min_tx_space - tx_space);
 636
 637			/* PCI/PCIx hardware has PBA alignment constraints */
 638			switch (hw->mac_type) {
 639			case e1000_82545 ... e1000_82546_rev_3:
 640				pba &= ~(E1000_PBA_8K - 1);
 641				break;
 642			default:
 643				break;
 644			}
 645
 646			/* if short on Rx space, Rx wins and must trump Tx
 647			 * adjustment or use Early Receive if available
 648			 */
 649			if (pba < min_rx_space)
 650				pba = min_rx_space;
 651		}
 652	}
 653
 654	ew32(PBA, pba);
 655
 656	/* flow control settings:
 657	 * The high water mark must be low enough to fit one full frame
 658	 * (or the size used for early receive) above it in the Rx FIFO.
 659	 * Set it to the lower of:
 660	 * - 90% of the Rx FIFO size, and
 661	 * - the full Rx FIFO size minus the early receive size (for parts
 662	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 663	 * - the full Rx FIFO size minus one full frame
 664	 */
 665	hwm = min(((pba << 10) * 9 / 10),
 666		  ((pba << 10) - hw->max_frame_size));
 667
 668	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 669	hw->fc_low_water = hw->fc_high_water - 8;
 670	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 671	hw->fc_send_xon = 1;
 672	hw->fc = hw->original_fc;
 673
 674	/* Allow time for pending master requests to run */
 675	e1000_reset_hw(hw);
 676	if (hw->mac_type >= e1000_82544)
 677		ew32(WUC, 0);
 678
 679	if (e1000_init_hw(hw))
 680		e_dev_err("Hardware Error\n");
 681	e1000_update_mng_vlan(adapter);
 682
 683	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 684	if (hw->mac_type >= e1000_82544 &&
 685	    hw->autoneg == 1 &&
 686	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 687		u32 ctrl = er32(CTRL);
 688		/* clear phy power management bit if we are in gig only mode,
 689		 * which if enabled will attempt negotiation to 100Mb, which
 690		 * can cause a loss of link at power off or driver unload
 691		 */
 692		ctrl &= ~E1000_CTRL_SWDPIN3;
 693		ew32(CTRL, ctrl);
 694	}
 695
 696	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 697	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 698
 699	e1000_reset_adaptive(hw);
 700	e1000_phy_get_info(hw, &adapter->phy_info);
 701
 702	e1000_release_manageability(adapter);
 703}
 704
 705/* Dump the eeprom for users having checksum issues */
 706static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 707{
 708	struct net_device *netdev = adapter->netdev;
 709	struct ethtool_eeprom eeprom;
 710	const struct ethtool_ops *ops = netdev->ethtool_ops;
 711	u8 *data;
 712	int i;
 713	u16 csum_old, csum_new = 0;
 714
 715	eeprom.len = ops->get_eeprom_len(netdev);
 716	eeprom.offset = 0;
 717
 718	data = kmalloc(eeprom.len, GFP_KERNEL);
 719	if (!data)
 720		return;
 721
 722	ops->get_eeprom(netdev, &eeprom, data);
 723
 724	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 725		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 726	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 727		csum_new += data[i] + (data[i + 1] << 8);
 728	csum_new = EEPROM_SUM - csum_new;
 729
 730	pr_err("/*********************/\n");
 731	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 732	pr_err("Calculated              : 0x%04x\n", csum_new);
 733
 734	pr_err("Offset    Values\n");
 735	pr_err("========  ======\n");
 736	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 737
 738	pr_err("Include this output when contacting your support provider.\n");
 739	pr_err("This is not a software error! Something bad happened to\n");
 740	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 741	pr_err("result in further problems, possibly loss of data,\n");
 742	pr_err("corruption or system hangs!\n");
 743	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 744	pr_err("which is invalid and requires you to set the proper MAC\n");
 745	pr_err("address manually before continuing to enable this network\n");
 746	pr_err("device. Please inspect the EEPROM dump and report the\n");
 747	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 748	pr_err("/*********************/\n");
 749
 750	kfree(data);
 751}
 752
 753/**
 754 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 755 * @pdev: PCI device information struct
 756 *
 757 * Return true if an adapter needs ioport resources
 758 **/
 759static int e1000_is_need_ioport(struct pci_dev *pdev)
 760{
 761	switch (pdev->device) {
 762	case E1000_DEV_ID_82540EM:
 763	case E1000_DEV_ID_82540EM_LOM:
 764	case E1000_DEV_ID_82540EP:
 765	case E1000_DEV_ID_82540EP_LOM:
 766	case E1000_DEV_ID_82540EP_LP:
 767	case E1000_DEV_ID_82541EI:
 768	case E1000_DEV_ID_82541EI_MOBILE:
 769	case E1000_DEV_ID_82541ER:
 770	case E1000_DEV_ID_82541ER_LOM:
 771	case E1000_DEV_ID_82541GI:
 772	case E1000_DEV_ID_82541GI_LF:
 773	case E1000_DEV_ID_82541GI_MOBILE:
 774	case E1000_DEV_ID_82544EI_COPPER:
 775	case E1000_DEV_ID_82544EI_FIBER:
 776	case E1000_DEV_ID_82544GC_COPPER:
 777	case E1000_DEV_ID_82544GC_LOM:
 778	case E1000_DEV_ID_82545EM_COPPER:
 779	case E1000_DEV_ID_82545EM_FIBER:
 780	case E1000_DEV_ID_82546EB_COPPER:
 781	case E1000_DEV_ID_82546EB_FIBER:
 782	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 783		return true;
 784	default:
 785		return false;
 786	}
 787}
 788
 789static netdev_features_t e1000_fix_features(struct net_device *netdev,
 790	netdev_features_t features)
 791{
 792	/* Since there is no support for separate Rx/Tx vlan accel
 793	 * enable/disable make sure Tx flag is always in same state as Rx.
 794	 */
 795	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 796		features |= NETIF_F_HW_VLAN_CTAG_TX;
 797	else
 798		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 799
 800	return features;
 801}
 802
 803static int e1000_set_features(struct net_device *netdev,
 804	netdev_features_t features)
 805{
 806	struct e1000_adapter *adapter = netdev_priv(netdev);
 807	netdev_features_t changed = features ^ netdev->features;
 808
 809	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 810		e1000_vlan_mode(netdev, features);
 811
 812	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 813		return 0;
 814
 815	netdev->features = features;
 816	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 817
 818	if (netif_running(netdev))
 819		e1000_reinit_locked(adapter);
 820	else
 821		e1000_reset(adapter);
 822
 823	return 1;
 824}
 825
 826static const struct net_device_ops e1000_netdev_ops = {
 827	.ndo_open		= e1000_open,
 828	.ndo_stop		= e1000_close,
 829	.ndo_start_xmit		= e1000_xmit_frame,
 830	.ndo_set_rx_mode	= e1000_set_rx_mode,
 831	.ndo_set_mac_address	= e1000_set_mac,
 832	.ndo_tx_timeout		= e1000_tx_timeout,
 833	.ndo_change_mtu		= e1000_change_mtu,
 834	.ndo_do_ioctl		= e1000_ioctl,
 835	.ndo_validate_addr	= eth_validate_addr,
 836	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 837	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 838#ifdef CONFIG_NET_POLL_CONTROLLER
 839	.ndo_poll_controller	= e1000_netpoll,
 840#endif
 841	.ndo_fix_features	= e1000_fix_features,
 842	.ndo_set_features	= e1000_set_features,
 843};
 844
 845/**
 846 * e1000_init_hw_struct - initialize members of hw struct
 847 * @adapter: board private struct
 848 * @hw: structure used by e1000_hw.c
 849 *
 850 * Factors out initialization of the e1000_hw struct to its own function
 851 * that can be called very early at init (just after struct allocation).
 852 * Fields are initialized based on PCI device information and
 853 * OS network device settings (MTU size).
 854 * Returns negative error codes if MAC type setup fails.
 855 */
 856static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 857				struct e1000_hw *hw)
 858{
 859	struct pci_dev *pdev = adapter->pdev;
 860
 861	/* PCI config space info */
 862	hw->vendor_id = pdev->vendor;
 863	hw->device_id = pdev->device;
 864	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 865	hw->subsystem_id = pdev->subsystem_device;
 866	hw->revision_id = pdev->revision;
 867
 868	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 869
 870	hw->max_frame_size = adapter->netdev->mtu +
 871			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 872	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 873
 874	/* identify the MAC */
 875	if (e1000_set_mac_type(hw)) {
 876		e_err(probe, "Unknown MAC Type\n");
 877		return -EIO;
 878	}
 879
 880	switch (hw->mac_type) {
 881	default:
 882		break;
 883	case e1000_82541:
 884	case e1000_82547:
 885	case e1000_82541_rev_2:
 886	case e1000_82547_rev_2:
 887		hw->phy_init_script = 1;
 888		break;
 889	}
 890
 891	e1000_set_media_type(hw);
 892	e1000_get_bus_info(hw);
 893
 894	hw->wait_autoneg_complete = false;
 895	hw->tbi_compatibility_en = true;
 896	hw->adaptive_ifs = true;
 897
 898	/* Copper options */
 899
 900	if (hw->media_type == e1000_media_type_copper) {
 901		hw->mdix = AUTO_ALL_MODES;
 902		hw->disable_polarity_correction = false;
 903		hw->master_slave = E1000_MASTER_SLAVE;
 904	}
 905
 906	return 0;
 907}
 908
 909/**
 910 * e1000_probe - Device Initialization Routine
 911 * @pdev: PCI device information struct
 912 * @ent: entry in e1000_pci_tbl
 913 *
 914 * Returns 0 on success, negative on failure
 915 *
 916 * e1000_probe initializes an adapter identified by a pci_dev structure.
 917 * The OS initialization, configuring of the adapter private structure,
 918 * and a hardware reset occur.
 919 **/
 920static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 921{
 922	struct net_device *netdev;
 923	struct e1000_adapter *adapter = NULL;
 924	struct e1000_hw *hw;
 925
 926	static int cards_found;
 927	static int global_quad_port_a; /* global ksp3 port a indication */
 928	int i, err, pci_using_dac;
 929	u16 eeprom_data = 0;
 930	u16 tmp = 0;
 931	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 932	int bars, need_ioport;
 933	bool disable_dev = false;
 934
 935	/* do not allocate ioport bars when not needed */
 936	need_ioport = e1000_is_need_ioport(pdev);
 937	if (need_ioport) {
 938		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 939		err = pci_enable_device(pdev);
 940	} else {
 941		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 942		err = pci_enable_device_mem(pdev);
 943	}
 944	if (err)
 945		return err;
 946
 947	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 948	if (err)
 949		goto err_pci_reg;
 950
 951	pci_set_master(pdev);
 952	err = pci_save_state(pdev);
 953	if (err)
 954		goto err_alloc_etherdev;
 955
 956	err = -ENOMEM;
 957	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 958	if (!netdev)
 959		goto err_alloc_etherdev;
 960
 961	SET_NETDEV_DEV(netdev, &pdev->dev);
 962
 963	pci_set_drvdata(pdev, netdev);
 964	adapter = netdev_priv(netdev);
 965	adapter->netdev = netdev;
 966	adapter->pdev = pdev;
 967	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 968	adapter->bars = bars;
 969	adapter->need_ioport = need_ioport;
 970
 971	hw = &adapter->hw;
 972	hw->back = adapter;
 973
 974	err = -EIO;
 975	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 976	if (!hw->hw_addr)
 977		goto err_ioremap;
 978
 979	if (adapter->need_ioport) {
 980		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 981			if (pci_resource_len(pdev, i) == 0)
 982				continue;
 983			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 984				hw->io_base = pci_resource_start(pdev, i);
 985				break;
 986			}
 987		}
 988	}
 989
 990	/* make ready for any if (hw->...) below */
 991	err = e1000_init_hw_struct(adapter, hw);
 992	if (err)
 993		goto err_sw_init;
 994
 995	/* there is a workaround being applied below that limits
 996	 * 64-bit DMA addresses to 64-bit hardware.  There are some
 997	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
 998	 */
 999	pci_using_dac = 0;
1000	if ((hw->bus_type == e1000_bus_type_pcix) &&
1001	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1002		pci_using_dac = 1;
1003	} else {
1004		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1005		if (err) {
1006			pr_err("No usable DMA config, aborting\n");
1007			goto err_dma;
1008		}
1009	}
1010
1011	netdev->netdev_ops = &e1000_netdev_ops;
1012	e1000_set_ethtool_ops(netdev);
1013	netdev->watchdog_timeo = 5 * HZ;
1014	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1015
1016	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1017
1018	adapter->bd_number = cards_found;
1019
1020	/* setup the private structure */
1021
1022	err = e1000_sw_init(adapter);
1023	if (err)
1024		goto err_sw_init;
1025
1026	err = -EIO;
1027	if (hw->mac_type == e1000_ce4100) {
1028		hw->ce4100_gbe_mdio_base_virt =
1029					ioremap(pci_resource_start(pdev, BAR_1),
1030						pci_resource_len(pdev, BAR_1));
1031
1032		if (!hw->ce4100_gbe_mdio_base_virt)
1033			goto err_mdio_ioremap;
1034	}
1035
1036	if (hw->mac_type >= e1000_82543) {
1037		netdev->hw_features = NETIF_F_SG |
1038				   NETIF_F_HW_CSUM |
1039				   NETIF_F_HW_VLAN_CTAG_RX;
1040		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1041				   NETIF_F_HW_VLAN_CTAG_FILTER;
1042	}
1043
1044	if ((hw->mac_type >= e1000_82544) &&
1045	   (hw->mac_type != e1000_82547))
1046		netdev->hw_features |= NETIF_F_TSO;
1047
1048	netdev->priv_flags |= IFF_SUPP_NOFCS;
1049
1050	netdev->features |= netdev->hw_features;
1051	netdev->hw_features |= (NETIF_F_RXCSUM |
1052				NETIF_F_RXALL |
1053				NETIF_F_RXFCS);
1054
1055	if (pci_using_dac) {
1056		netdev->features |= NETIF_F_HIGHDMA;
1057		netdev->vlan_features |= NETIF_F_HIGHDMA;
1058	}
1059
1060	netdev->vlan_features |= (NETIF_F_TSO |
1061				  NETIF_F_HW_CSUM |
1062				  NETIF_F_SG);
1063
1064	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1065	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1066	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1067		netdev->priv_flags |= IFF_UNICAST_FLT;
1068
1069	/* MTU range: 46 - 16110 */
1070	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1071	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1072
1073	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074
1075	/* initialize eeprom parameters */
1076	if (e1000_init_eeprom_params(hw)) {
1077		e_err(probe, "EEPROM initialization failed\n");
1078		goto err_eeprom;
1079	}
1080
1081	/* before reading the EEPROM, reset the controller to
1082	 * put the device in a known good starting state
1083	 */
1084
1085	e1000_reset_hw(hw);
1086
1087	/* make sure the EEPROM is good */
1088	if (e1000_validate_eeprom_checksum(hw) < 0) {
1089		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090		e1000_dump_eeprom(adapter);
1091		/* set MAC address to all zeroes to invalidate and temporary
1092		 * disable this device for the user. This blocks regular
1093		 * traffic while still permitting ethtool ioctls from reaching
1094		 * the hardware as well as allowing the user to run the
1095		 * interface after manually setting a hw addr using
1096		 * `ip set address`
1097		 */
1098		memset(hw->mac_addr, 0, netdev->addr_len);
1099	} else {
1100		/* copy the MAC address out of the EEPROM */
1101		if (e1000_read_mac_addr(hw))
1102			e_err(probe, "EEPROM Read Error\n");
1103	}
1104	/* don't block initialization here due to bad MAC address */
1105	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1106
1107	if (!is_valid_ether_addr(netdev->dev_addr))
1108		e_err(probe, "Invalid MAC Address\n");
1109
1110
1111	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113			  e1000_82547_tx_fifo_stall_task);
1114	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116
1117	e1000_check_options(adapter);
1118
1119	/* Initial Wake on LAN setting
1120	 * If APM wake is enabled in the EEPROM,
1121	 * enable the ACPI Magic Packet filter
1122	 */
1123
1124	switch (hw->mac_type) {
1125	case e1000_82542_rev2_0:
1126	case e1000_82542_rev2_1:
1127	case e1000_82543:
1128		break;
1129	case e1000_82544:
1130		e1000_read_eeprom(hw,
1131			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133		break;
1134	case e1000_82546:
1135	case e1000_82546_rev_3:
1136		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1137			e1000_read_eeprom(hw,
1138				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139			break;
1140		}
1141		fallthrough;
1142	default:
1143		e1000_read_eeprom(hw,
1144			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145		break;
1146	}
1147	if (eeprom_data & eeprom_apme_mask)
1148		adapter->eeprom_wol |= E1000_WUFC_MAG;
1149
1150	/* now that we have the eeprom settings, apply the special cases
1151	 * where the eeprom may be wrong or the board simply won't support
1152	 * wake on lan on a particular port
1153	 */
1154	switch (pdev->device) {
1155	case E1000_DEV_ID_82546GB_PCIE:
1156		adapter->eeprom_wol = 0;
1157		break;
1158	case E1000_DEV_ID_82546EB_FIBER:
1159	case E1000_DEV_ID_82546GB_FIBER:
1160		/* Wake events only supported on port A for dual fiber
1161		 * regardless of eeprom setting
1162		 */
1163		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164			adapter->eeprom_wol = 0;
1165		break;
1166	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167		/* if quad port adapter, disable WoL on all but port A */
1168		if (global_quad_port_a != 0)
1169			adapter->eeprom_wol = 0;
1170		else
1171			adapter->quad_port_a = true;
1172		/* Reset for multiple quad port adapters */
1173		if (++global_quad_port_a == 4)
1174			global_quad_port_a = 0;
1175		break;
1176	}
1177
1178	/* initialize the wol settings based on the eeprom settings */
1179	adapter->wol = adapter->eeprom_wol;
1180	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181
1182	/* Auto detect PHY address */
1183	if (hw->mac_type == e1000_ce4100) {
1184		for (i = 0; i < 32; i++) {
1185			hw->phy_addr = i;
1186			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187
1188			if (tmp != 0 && tmp != 0xFF)
1189				break;
1190		}
1191
1192		if (i >= 32)
1193			goto err_eeprom;
1194	}
1195
1196	/* reset the hardware with the new settings */
1197	e1000_reset(adapter);
1198
1199	strcpy(netdev->name, "eth%d");
1200	err = register_netdev(netdev);
1201	if (err)
1202		goto err_register;
1203
1204	e1000_vlan_filter_on_off(adapter, false);
1205
1206	/* print bus type/speed/width info */
1207	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214	       netdev->dev_addr);
1215
1216	/* carrier off reporting is important to ethtool even BEFORE open */
1217	netif_carrier_off(netdev);
1218
1219	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220
1221	cards_found++;
1222	return 0;
1223
1224err_register:
1225err_eeprom:
1226	e1000_phy_hw_reset(hw);
1227
1228	if (hw->flash_address)
1229		iounmap(hw->flash_address);
1230	kfree(adapter->tx_ring);
1231	kfree(adapter->rx_ring);
1232err_dma:
1233err_sw_init:
1234err_mdio_ioremap:
1235	iounmap(hw->ce4100_gbe_mdio_base_virt);
1236	iounmap(hw->hw_addr);
1237err_ioremap:
1238	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1239	free_netdev(netdev);
1240err_alloc_etherdev:
1241	pci_release_selected_regions(pdev, bars);
1242err_pci_reg:
1243	if (!adapter || disable_dev)
1244		pci_disable_device(pdev);
1245	return err;
1246}
1247
1248/**
1249 * e1000_remove - Device Removal Routine
1250 * @pdev: PCI device information struct
1251 *
1252 * e1000_remove is called by the PCI subsystem to alert the driver
1253 * that it should release a PCI device. That could be caused by a
1254 * Hot-Plug event, or because the driver is going to be removed from
1255 * memory.
1256 **/
1257static void e1000_remove(struct pci_dev *pdev)
1258{
1259	struct net_device *netdev = pci_get_drvdata(pdev);
1260	struct e1000_adapter *adapter = netdev_priv(netdev);
1261	struct e1000_hw *hw = &adapter->hw;
1262	bool disable_dev;
1263
1264	e1000_down_and_stop(adapter);
1265	e1000_release_manageability(adapter);
1266
1267	unregister_netdev(netdev);
1268
1269	e1000_phy_hw_reset(hw);
1270
1271	kfree(adapter->tx_ring);
1272	kfree(adapter->rx_ring);
1273
1274	if (hw->mac_type == e1000_ce4100)
1275		iounmap(hw->ce4100_gbe_mdio_base_virt);
1276	iounmap(hw->hw_addr);
1277	if (hw->flash_address)
1278		iounmap(hw->flash_address);
1279	pci_release_selected_regions(pdev, adapter->bars);
1280
1281	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1282	free_netdev(netdev);
1283
1284	if (disable_dev)
1285		pci_disable_device(pdev);
1286}
1287
1288/**
1289 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1290 * @adapter: board private structure to initialize
1291 *
1292 * e1000_sw_init initializes the Adapter private data structure.
1293 * e1000_init_hw_struct MUST be called before this function
1294 **/
1295static int e1000_sw_init(struct e1000_adapter *adapter)
1296{
1297	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1298
1299	adapter->num_tx_queues = 1;
1300	adapter->num_rx_queues = 1;
1301
1302	if (e1000_alloc_queues(adapter)) {
1303		e_err(probe, "Unable to allocate memory for queues\n");
1304		return -ENOMEM;
1305	}
1306
1307	/* Explicitly disable IRQ since the NIC can be in any state. */
1308	e1000_irq_disable(adapter);
1309
1310	spin_lock_init(&adapter->stats_lock);
1311
1312	set_bit(__E1000_DOWN, &adapter->flags);
1313
1314	return 0;
1315}
1316
1317/**
1318 * e1000_alloc_queues - Allocate memory for all rings
1319 * @adapter: board private structure to initialize
1320 *
1321 * We allocate one ring per queue at run-time since we don't know the
1322 * number of queues at compile-time.
1323 **/
1324static int e1000_alloc_queues(struct e1000_adapter *adapter)
1325{
1326	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1327				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1328	if (!adapter->tx_ring)
1329		return -ENOMEM;
1330
1331	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1332				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1333	if (!adapter->rx_ring) {
1334		kfree(adapter->tx_ring);
1335		return -ENOMEM;
1336	}
1337
1338	return E1000_SUCCESS;
1339}
1340
1341/**
1342 * e1000_open - Called when a network interface is made active
1343 * @netdev: network interface device structure
1344 *
1345 * Returns 0 on success, negative value on failure
1346 *
1347 * The open entry point is called when a network interface is made
1348 * active by the system (IFF_UP).  At this point all resources needed
1349 * for transmit and receive operations are allocated, the interrupt
1350 * handler is registered with the OS, the watchdog task is started,
1351 * and the stack is notified that the interface is ready.
1352 **/
1353int e1000_open(struct net_device *netdev)
1354{
1355	struct e1000_adapter *adapter = netdev_priv(netdev);
1356	struct e1000_hw *hw = &adapter->hw;
1357	int err;
1358
1359	/* disallow open during test */
1360	if (test_bit(__E1000_TESTING, &adapter->flags))
1361		return -EBUSY;
1362
1363	netif_carrier_off(netdev);
1364
1365	/* allocate transmit descriptors */
1366	err = e1000_setup_all_tx_resources(adapter);
1367	if (err)
1368		goto err_setup_tx;
1369
1370	/* allocate receive descriptors */
1371	err = e1000_setup_all_rx_resources(adapter);
1372	if (err)
1373		goto err_setup_rx;
1374
1375	e1000_power_up_phy(adapter);
1376
1377	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1378	if ((hw->mng_cookie.status &
1379			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1380		e1000_update_mng_vlan(adapter);
1381	}
1382
1383	/* before we allocate an interrupt, we must be ready to handle it.
1384	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1385	 * as soon as we call pci_request_irq, so we have to setup our
1386	 * clean_rx handler before we do so.
1387	 */
1388	e1000_configure(adapter);
1389
1390	err = e1000_request_irq(adapter);
1391	if (err)
1392		goto err_req_irq;
1393
1394	/* From here on the code is the same as e1000_up() */
1395	clear_bit(__E1000_DOWN, &adapter->flags);
1396
1397	napi_enable(&adapter->napi);
1398
1399	e1000_irq_enable(adapter);
1400
1401	netif_start_queue(netdev);
1402
1403	/* fire a link status change interrupt to start the watchdog */
1404	ew32(ICS, E1000_ICS_LSC);
1405
1406	return E1000_SUCCESS;
1407
1408err_req_irq:
1409	e1000_power_down_phy(adapter);
1410	e1000_free_all_rx_resources(adapter);
1411err_setup_rx:
1412	e1000_free_all_tx_resources(adapter);
1413err_setup_tx:
1414	e1000_reset(adapter);
1415
1416	return err;
1417}
1418
1419/**
1420 * e1000_close - Disables a network interface
1421 * @netdev: network interface device structure
1422 *
1423 * Returns 0, this is not allowed to fail
1424 *
1425 * The close entry point is called when an interface is de-activated
1426 * by the OS.  The hardware is still under the drivers control, but
1427 * needs to be disabled.  A global MAC reset is issued to stop the
1428 * hardware, and all transmit and receive resources are freed.
1429 **/
1430int e1000_close(struct net_device *netdev)
1431{
1432	struct e1000_adapter *adapter = netdev_priv(netdev);
1433	struct e1000_hw *hw = &adapter->hw;
1434	int count = E1000_CHECK_RESET_COUNT;
1435
1436	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1437		usleep_range(10000, 20000);
1438
1439	WARN_ON(count < 0);
1440
1441	/* signal that we're down so that the reset task will no longer run */
1442	set_bit(__E1000_DOWN, &adapter->flags);
1443	clear_bit(__E1000_RESETTING, &adapter->flags);
1444
1445	e1000_down(adapter);
1446	e1000_power_down_phy(adapter);
1447	e1000_free_irq(adapter);
1448
1449	e1000_free_all_tx_resources(adapter);
1450	e1000_free_all_rx_resources(adapter);
1451
1452	/* kill manageability vlan ID if supported, but not if a vlan with
1453	 * the same ID is registered on the host OS (let 8021q kill it)
1454	 */
1455	if ((hw->mng_cookie.status &
1456	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1457	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1458		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1459				       adapter->mng_vlan_id);
1460	}
1461
1462	return 0;
1463}
1464
1465/**
1466 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1467 * @adapter: address of board private structure
1468 * @start: address of beginning of memory
1469 * @len: length of memory
1470 **/
1471static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1472				  unsigned long len)
1473{
1474	struct e1000_hw *hw = &adapter->hw;
1475	unsigned long begin = (unsigned long)start;
1476	unsigned long end = begin + len;
1477
1478	/* First rev 82545 and 82546 need to not allow any memory
1479	 * write location to cross 64k boundary due to errata 23
1480	 */
1481	if (hw->mac_type == e1000_82545 ||
1482	    hw->mac_type == e1000_ce4100 ||
1483	    hw->mac_type == e1000_82546) {
1484		return ((begin ^ (end - 1)) >> 16) == 0;
1485	}
1486
1487	return true;
1488}
1489
1490/**
1491 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1492 * @adapter: board private structure
1493 * @txdr:    tx descriptor ring (for a specific queue) to setup
1494 *
1495 * Return 0 on success, negative on failure
1496 **/
1497static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1498				    struct e1000_tx_ring *txdr)
1499{
1500	struct pci_dev *pdev = adapter->pdev;
1501	int size;
1502
1503	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1504	txdr->buffer_info = vzalloc(size);
1505	if (!txdr->buffer_info)
1506		return -ENOMEM;
1507
1508	/* round up to nearest 4K */
1509
1510	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1511	txdr->size = ALIGN(txdr->size, 4096);
1512
1513	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1514					GFP_KERNEL);
1515	if (!txdr->desc) {
1516setup_tx_desc_die:
1517		vfree(txdr->buffer_info);
1518		return -ENOMEM;
1519	}
1520
1521	/* Fix for errata 23, can't cross 64kB boundary */
1522	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1523		void *olddesc = txdr->desc;
1524		dma_addr_t olddma = txdr->dma;
1525		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1526		      txdr->size, txdr->desc);
1527		/* Try again, without freeing the previous */
1528		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1529						&txdr->dma, GFP_KERNEL);
1530		/* Failed allocation, critical failure */
1531		if (!txdr->desc) {
1532			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1533					  olddma);
1534			goto setup_tx_desc_die;
1535		}
1536
1537		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1538			/* give up */
1539			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1540					  txdr->dma);
1541			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1542					  olddma);
1543			e_err(probe, "Unable to allocate aligned memory "
1544			      "for the transmit descriptor ring\n");
1545			vfree(txdr->buffer_info);
1546			return -ENOMEM;
1547		} else {
1548			/* Free old allocation, new allocation was successful */
1549			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1550					  olddma);
1551		}
1552	}
1553	memset(txdr->desc, 0, txdr->size);
1554
1555	txdr->next_to_use = 0;
1556	txdr->next_to_clean = 0;
1557
1558	return 0;
1559}
1560
1561/**
1562 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1563 * 				  (Descriptors) for all queues
1564 * @adapter: board private structure
1565 *
1566 * Return 0 on success, negative on failure
1567 **/
1568int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1569{
1570	int i, err = 0;
1571
1572	for (i = 0; i < adapter->num_tx_queues; i++) {
1573		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1574		if (err) {
1575			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1576			for (i-- ; i >= 0; i--)
1577				e1000_free_tx_resources(adapter,
1578							&adapter->tx_ring[i]);
1579			break;
1580		}
1581	}
1582
1583	return err;
1584}
1585
1586/**
1587 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1588 * @adapter: board private structure
1589 *
1590 * Configure the Tx unit of the MAC after a reset.
1591 **/
1592static void e1000_configure_tx(struct e1000_adapter *adapter)
1593{
1594	u64 tdba;
1595	struct e1000_hw *hw = &adapter->hw;
1596	u32 tdlen, tctl, tipg;
1597	u32 ipgr1, ipgr2;
1598
1599	/* Setup the HW Tx Head and Tail descriptor pointers */
1600
1601	switch (adapter->num_tx_queues) {
1602	case 1:
1603	default:
1604		tdba = adapter->tx_ring[0].dma;
1605		tdlen = adapter->tx_ring[0].count *
1606			sizeof(struct e1000_tx_desc);
1607		ew32(TDLEN, tdlen);
1608		ew32(TDBAH, (tdba >> 32));
1609		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1610		ew32(TDT, 0);
1611		ew32(TDH, 0);
1612		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1613					   E1000_TDH : E1000_82542_TDH);
1614		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1615					   E1000_TDT : E1000_82542_TDT);
1616		break;
1617	}
1618
1619	/* Set the default values for the Tx Inter Packet Gap timer */
1620	if ((hw->media_type == e1000_media_type_fiber ||
1621	     hw->media_type == e1000_media_type_internal_serdes))
1622		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1623	else
1624		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1625
1626	switch (hw->mac_type) {
1627	case e1000_82542_rev2_0:
1628	case e1000_82542_rev2_1:
1629		tipg = DEFAULT_82542_TIPG_IPGT;
1630		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1631		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1632		break;
1633	default:
1634		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1635		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1636		break;
1637	}
1638	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1639	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1640	ew32(TIPG, tipg);
1641
1642	/* Set the Tx Interrupt Delay register */
1643
1644	ew32(TIDV, adapter->tx_int_delay);
1645	if (hw->mac_type >= e1000_82540)
1646		ew32(TADV, adapter->tx_abs_int_delay);
1647
1648	/* Program the Transmit Control Register */
1649
1650	tctl = er32(TCTL);
1651	tctl &= ~E1000_TCTL_CT;
1652	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1653		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1654
1655	e1000_config_collision_dist(hw);
1656
1657	/* Setup Transmit Descriptor Settings for eop descriptor */
1658	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1659
1660	/* only set IDE if we are delaying interrupts using the timers */
1661	if (adapter->tx_int_delay)
1662		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1663
1664	if (hw->mac_type < e1000_82543)
1665		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1666	else
1667		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1668
1669	/* Cache if we're 82544 running in PCI-X because we'll
1670	 * need this to apply a workaround later in the send path.
1671	 */
1672	if (hw->mac_type == e1000_82544 &&
1673	    hw->bus_type == e1000_bus_type_pcix)
1674		adapter->pcix_82544 = true;
1675
1676	ew32(TCTL, tctl);
1677
1678}
1679
1680/**
1681 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1682 * @adapter: board private structure
1683 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1684 *
1685 * Returns 0 on success, negative on failure
1686 **/
1687static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1688				    struct e1000_rx_ring *rxdr)
1689{
1690	struct pci_dev *pdev = adapter->pdev;
1691	int size, desc_len;
1692
1693	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1694	rxdr->buffer_info = vzalloc(size);
1695	if (!rxdr->buffer_info)
1696		return -ENOMEM;
1697
1698	desc_len = sizeof(struct e1000_rx_desc);
1699
1700	/* Round up to nearest 4K */
1701
1702	rxdr->size = rxdr->count * desc_len;
1703	rxdr->size = ALIGN(rxdr->size, 4096);
1704
1705	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1706					GFP_KERNEL);
1707	if (!rxdr->desc) {
1708setup_rx_desc_die:
1709		vfree(rxdr->buffer_info);
1710		return -ENOMEM;
1711	}
1712
1713	/* Fix for errata 23, can't cross 64kB boundary */
1714	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1715		void *olddesc = rxdr->desc;
1716		dma_addr_t olddma = rxdr->dma;
1717		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1718		      rxdr->size, rxdr->desc);
1719		/* Try again, without freeing the previous */
1720		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1721						&rxdr->dma, GFP_KERNEL);
1722		/* Failed allocation, critical failure */
1723		if (!rxdr->desc) {
1724			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1725					  olddma);
1726			goto setup_rx_desc_die;
1727		}
1728
1729		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1730			/* give up */
1731			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1732					  rxdr->dma);
1733			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1734					  olddma);
1735			e_err(probe, "Unable to allocate aligned memory for "
1736			      "the Rx descriptor ring\n");
1737			goto setup_rx_desc_die;
1738		} else {
1739			/* Free old allocation, new allocation was successful */
1740			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1741					  olddma);
1742		}
1743	}
1744	memset(rxdr->desc, 0, rxdr->size);
1745
1746	rxdr->next_to_clean = 0;
1747	rxdr->next_to_use = 0;
1748	rxdr->rx_skb_top = NULL;
1749
1750	return 0;
1751}
1752
1753/**
1754 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1755 * 				  (Descriptors) for all queues
1756 * @adapter: board private structure
1757 *
1758 * Return 0 on success, negative on failure
1759 **/
1760int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1761{
1762	int i, err = 0;
1763
1764	for (i = 0; i < adapter->num_rx_queues; i++) {
1765		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1766		if (err) {
1767			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1768			for (i-- ; i >= 0; i--)
1769				e1000_free_rx_resources(adapter,
1770							&adapter->rx_ring[i]);
1771			break;
1772		}
1773	}
1774
1775	return err;
1776}
1777
1778/**
1779 * e1000_setup_rctl - configure the receive control registers
1780 * @adapter: Board private structure
1781 **/
1782static void e1000_setup_rctl(struct e1000_adapter *adapter)
1783{
1784	struct e1000_hw *hw = &adapter->hw;
1785	u32 rctl;
1786
1787	rctl = er32(RCTL);
1788
1789	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1790
1791	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1792		E1000_RCTL_RDMTS_HALF |
1793		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1794
1795	if (hw->tbi_compatibility_on == 1)
1796		rctl |= E1000_RCTL_SBP;
1797	else
1798		rctl &= ~E1000_RCTL_SBP;
1799
1800	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1801		rctl &= ~E1000_RCTL_LPE;
1802	else
1803		rctl |= E1000_RCTL_LPE;
1804
1805	/* Setup buffer sizes */
1806	rctl &= ~E1000_RCTL_SZ_4096;
1807	rctl |= E1000_RCTL_BSEX;
1808	switch (adapter->rx_buffer_len) {
1809	case E1000_RXBUFFER_2048:
1810	default:
1811		rctl |= E1000_RCTL_SZ_2048;
1812		rctl &= ~E1000_RCTL_BSEX;
1813		break;
1814	case E1000_RXBUFFER_4096:
1815		rctl |= E1000_RCTL_SZ_4096;
1816		break;
1817	case E1000_RXBUFFER_8192:
1818		rctl |= E1000_RCTL_SZ_8192;
1819		break;
1820	case E1000_RXBUFFER_16384:
1821		rctl |= E1000_RCTL_SZ_16384;
1822		break;
1823	}
1824
1825	/* This is useful for sniffing bad packets. */
1826	if (adapter->netdev->features & NETIF_F_RXALL) {
1827		/* UPE and MPE will be handled by normal PROMISC logic
1828		 * in e1000e_set_rx_mode
1829		 */
1830		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1831			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1832			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1833
1834		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1835			  E1000_RCTL_DPF | /* Allow filtered pause */
1836			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1837		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1838		 * and that breaks VLANs.
1839		 */
1840	}
1841
1842	ew32(RCTL, rctl);
1843}
1844
1845/**
1846 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1847 * @adapter: board private structure
1848 *
1849 * Configure the Rx unit of the MAC after a reset.
1850 **/
1851static void e1000_configure_rx(struct e1000_adapter *adapter)
1852{
1853	u64 rdba;
1854	struct e1000_hw *hw = &adapter->hw;
1855	u32 rdlen, rctl, rxcsum;
1856
1857	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1858		rdlen = adapter->rx_ring[0].count *
1859			sizeof(struct e1000_rx_desc);
1860		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1861		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1862	} else {
1863		rdlen = adapter->rx_ring[0].count *
1864			sizeof(struct e1000_rx_desc);
1865		adapter->clean_rx = e1000_clean_rx_irq;
1866		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1867	}
1868
1869	/* disable receives while setting up the descriptors */
1870	rctl = er32(RCTL);
1871	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1872
1873	/* set the Receive Delay Timer Register */
1874	ew32(RDTR, adapter->rx_int_delay);
1875
1876	if (hw->mac_type >= e1000_82540) {
1877		ew32(RADV, adapter->rx_abs_int_delay);
1878		if (adapter->itr_setting != 0)
1879			ew32(ITR, 1000000000 / (adapter->itr * 256));
1880	}
1881
1882	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1883	 * the Base and Length of the Rx Descriptor Ring
1884	 */
1885	switch (adapter->num_rx_queues) {
1886	case 1:
1887	default:
1888		rdba = adapter->rx_ring[0].dma;
1889		ew32(RDLEN, rdlen);
1890		ew32(RDBAH, (rdba >> 32));
1891		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1892		ew32(RDT, 0);
1893		ew32(RDH, 0);
1894		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1895					   E1000_RDH : E1000_82542_RDH);
1896		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1897					   E1000_RDT : E1000_82542_RDT);
1898		break;
1899	}
1900
1901	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1902	if (hw->mac_type >= e1000_82543) {
1903		rxcsum = er32(RXCSUM);
1904		if (adapter->rx_csum)
1905			rxcsum |= E1000_RXCSUM_TUOFL;
1906		else
1907			/* don't need to clear IPPCSE as it defaults to 0 */
1908			rxcsum &= ~E1000_RXCSUM_TUOFL;
1909		ew32(RXCSUM, rxcsum);
1910	}
1911
1912	/* Enable Receives */
1913	ew32(RCTL, rctl | E1000_RCTL_EN);
1914}
1915
1916/**
1917 * e1000_free_tx_resources - Free Tx Resources per Queue
1918 * @adapter: board private structure
1919 * @tx_ring: Tx descriptor ring for a specific queue
1920 *
1921 * Free all transmit software resources
1922 **/
1923static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1924				    struct e1000_tx_ring *tx_ring)
1925{
1926	struct pci_dev *pdev = adapter->pdev;
1927
1928	e1000_clean_tx_ring(adapter, tx_ring);
1929
1930	vfree(tx_ring->buffer_info);
1931	tx_ring->buffer_info = NULL;
1932
1933	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1934			  tx_ring->dma);
1935
1936	tx_ring->desc = NULL;
1937}
1938
1939/**
1940 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1941 * @adapter: board private structure
1942 *
1943 * Free all transmit software resources
1944 **/
1945void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1946{
1947	int i;
1948
1949	for (i = 0; i < adapter->num_tx_queues; i++)
1950		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1951}
1952
1953static void
1954e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1955				 struct e1000_tx_buffer *buffer_info)
 
1956{
1957	if (buffer_info->dma) {
1958		if (buffer_info->mapped_as_page)
1959			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1960				       buffer_info->length, DMA_TO_DEVICE);
1961		else
1962			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1963					 buffer_info->length,
1964					 DMA_TO_DEVICE);
1965		buffer_info->dma = 0;
1966	}
1967	if (buffer_info->skb) {
1968		dev_kfree_skb_any(buffer_info->skb);
1969		buffer_info->skb = NULL;
1970	}
1971	buffer_info->time_stamp = 0;
1972	/* buffer_info must be completely set up in the transmit path */
1973}
1974
1975/**
1976 * e1000_clean_tx_ring - Free Tx Buffers
1977 * @adapter: board private structure
1978 * @tx_ring: ring to be cleaned
1979 **/
1980static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1981				struct e1000_tx_ring *tx_ring)
1982{
1983	struct e1000_hw *hw = &adapter->hw;
1984	struct e1000_tx_buffer *buffer_info;
1985	unsigned long size;
1986	unsigned int i;
1987
1988	/* Free all the Tx ring sk_buffs */
1989
1990	for (i = 0; i < tx_ring->count; i++) {
1991		buffer_info = &tx_ring->buffer_info[i];
1992		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1993	}
1994
1995	netdev_reset_queue(adapter->netdev);
1996	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1997	memset(tx_ring->buffer_info, 0, size);
1998
1999	/* Zero out the descriptor ring */
2000
2001	memset(tx_ring->desc, 0, tx_ring->size);
2002
2003	tx_ring->next_to_use = 0;
2004	tx_ring->next_to_clean = 0;
2005	tx_ring->last_tx_tso = false;
2006
2007	writel(0, hw->hw_addr + tx_ring->tdh);
2008	writel(0, hw->hw_addr + tx_ring->tdt);
2009}
2010
2011/**
2012 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2013 * @adapter: board private structure
2014 **/
2015static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2016{
2017	int i;
2018
2019	for (i = 0; i < adapter->num_tx_queues; i++)
2020		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2021}
2022
2023/**
2024 * e1000_free_rx_resources - Free Rx Resources
2025 * @adapter: board private structure
2026 * @rx_ring: ring to clean the resources from
2027 *
2028 * Free all receive software resources
2029 **/
2030static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2031				    struct e1000_rx_ring *rx_ring)
2032{
2033	struct pci_dev *pdev = adapter->pdev;
2034
2035	e1000_clean_rx_ring(adapter, rx_ring);
2036
2037	vfree(rx_ring->buffer_info);
2038	rx_ring->buffer_info = NULL;
2039
2040	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2041			  rx_ring->dma);
2042
2043	rx_ring->desc = NULL;
2044}
2045
2046/**
2047 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2048 * @adapter: board private structure
2049 *
2050 * Free all receive software resources
2051 **/
2052void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2053{
2054	int i;
2055
2056	for (i = 0; i < adapter->num_rx_queues; i++)
2057		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2058}
2059
2060#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2061static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2062{
2063	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2064		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2065}
2066
2067static void *e1000_alloc_frag(const struct e1000_adapter *a)
2068{
2069	unsigned int len = e1000_frag_len(a);
2070	u8 *data = netdev_alloc_frag(len);
2071
2072	if (likely(data))
2073		data += E1000_HEADROOM;
2074	return data;
2075}
2076
2077/**
2078 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2079 * @adapter: board private structure
2080 * @rx_ring: ring to free buffers from
2081 **/
2082static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2083				struct e1000_rx_ring *rx_ring)
2084{
2085	struct e1000_hw *hw = &adapter->hw;
2086	struct e1000_rx_buffer *buffer_info;
2087	struct pci_dev *pdev = adapter->pdev;
2088	unsigned long size;
2089	unsigned int i;
2090
2091	/* Free all the Rx netfrags */
2092	for (i = 0; i < rx_ring->count; i++) {
2093		buffer_info = &rx_ring->buffer_info[i];
2094		if (adapter->clean_rx == e1000_clean_rx_irq) {
2095			if (buffer_info->dma)
2096				dma_unmap_single(&pdev->dev, buffer_info->dma,
2097						 adapter->rx_buffer_len,
2098						 DMA_FROM_DEVICE);
2099			if (buffer_info->rxbuf.data) {
2100				skb_free_frag(buffer_info->rxbuf.data);
2101				buffer_info->rxbuf.data = NULL;
2102			}
2103		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2104			if (buffer_info->dma)
2105				dma_unmap_page(&pdev->dev, buffer_info->dma,
2106					       adapter->rx_buffer_len,
2107					       DMA_FROM_DEVICE);
2108			if (buffer_info->rxbuf.page) {
2109				put_page(buffer_info->rxbuf.page);
2110				buffer_info->rxbuf.page = NULL;
2111			}
2112		}
2113
2114		buffer_info->dma = 0;
2115	}
2116
2117	/* there also may be some cached data from a chained receive */
2118	napi_free_frags(&adapter->napi);
2119	rx_ring->rx_skb_top = NULL;
2120
2121	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2122	memset(rx_ring->buffer_info, 0, size);
2123
2124	/* Zero out the descriptor ring */
2125	memset(rx_ring->desc, 0, rx_ring->size);
2126
2127	rx_ring->next_to_clean = 0;
2128	rx_ring->next_to_use = 0;
2129
2130	writel(0, hw->hw_addr + rx_ring->rdh);
2131	writel(0, hw->hw_addr + rx_ring->rdt);
2132}
2133
2134/**
2135 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2136 * @adapter: board private structure
2137 **/
2138static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2139{
2140	int i;
2141
2142	for (i = 0; i < adapter->num_rx_queues; i++)
2143		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2144}
2145
2146/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2147 * and memory write and invalidate disabled for certain operations
2148 */
2149static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2150{
2151	struct e1000_hw *hw = &adapter->hw;
2152	struct net_device *netdev = adapter->netdev;
2153	u32 rctl;
2154
2155	e1000_pci_clear_mwi(hw);
2156
2157	rctl = er32(RCTL);
2158	rctl |= E1000_RCTL_RST;
2159	ew32(RCTL, rctl);
2160	E1000_WRITE_FLUSH();
2161	mdelay(5);
2162
2163	if (netif_running(netdev))
2164		e1000_clean_all_rx_rings(adapter);
2165}
2166
2167static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2168{
2169	struct e1000_hw *hw = &adapter->hw;
2170	struct net_device *netdev = adapter->netdev;
2171	u32 rctl;
2172
2173	rctl = er32(RCTL);
2174	rctl &= ~E1000_RCTL_RST;
2175	ew32(RCTL, rctl);
2176	E1000_WRITE_FLUSH();
2177	mdelay(5);
2178
2179	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2180		e1000_pci_set_mwi(hw);
2181
2182	if (netif_running(netdev)) {
2183		/* No need to loop, because 82542 supports only 1 queue */
2184		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2185		e1000_configure_rx(adapter);
2186		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2187	}
2188}
2189
2190/**
2191 * e1000_set_mac - Change the Ethernet Address of the NIC
2192 * @netdev: network interface device structure
2193 * @p: pointer to an address structure
2194 *
2195 * Returns 0 on success, negative on failure
2196 **/
2197static int e1000_set_mac(struct net_device *netdev, void *p)
2198{
2199	struct e1000_adapter *adapter = netdev_priv(netdev);
2200	struct e1000_hw *hw = &adapter->hw;
2201	struct sockaddr *addr = p;
2202
2203	if (!is_valid_ether_addr(addr->sa_data))
2204		return -EADDRNOTAVAIL;
2205
2206	/* 82542 2.0 needs to be in reset to write receive address registers */
2207
2208	if (hw->mac_type == e1000_82542_rev2_0)
2209		e1000_enter_82542_rst(adapter);
2210
2211	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2212	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2213
2214	e1000_rar_set(hw, hw->mac_addr, 0);
2215
2216	if (hw->mac_type == e1000_82542_rev2_0)
2217		e1000_leave_82542_rst(adapter);
2218
2219	return 0;
2220}
2221
2222/**
2223 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2224 * @netdev: network interface device structure
2225 *
2226 * The set_rx_mode entry point is called whenever the unicast or multicast
2227 * address lists or the network interface flags are updated. This routine is
2228 * responsible for configuring the hardware for proper unicast, multicast,
2229 * promiscuous mode, and all-multi behavior.
2230 **/
2231static void e1000_set_rx_mode(struct net_device *netdev)
2232{
2233	struct e1000_adapter *adapter = netdev_priv(netdev);
2234	struct e1000_hw *hw = &adapter->hw;
2235	struct netdev_hw_addr *ha;
2236	bool use_uc = false;
2237	u32 rctl;
2238	u32 hash_value;
2239	int i, rar_entries = E1000_RAR_ENTRIES;
2240	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2241	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2242
2243	if (!mcarray)
2244		return;
2245
2246	/* Check for Promiscuous and All Multicast modes */
2247
2248	rctl = er32(RCTL);
2249
2250	if (netdev->flags & IFF_PROMISC) {
2251		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2252		rctl &= ~E1000_RCTL_VFE;
2253	} else {
2254		if (netdev->flags & IFF_ALLMULTI)
2255			rctl |= E1000_RCTL_MPE;
2256		else
2257			rctl &= ~E1000_RCTL_MPE;
2258		/* Enable VLAN filter if there is a VLAN */
2259		if (e1000_vlan_used(adapter))
2260			rctl |= E1000_RCTL_VFE;
2261	}
2262
2263	if (netdev_uc_count(netdev) > rar_entries - 1) {
2264		rctl |= E1000_RCTL_UPE;
2265	} else if (!(netdev->flags & IFF_PROMISC)) {
2266		rctl &= ~E1000_RCTL_UPE;
2267		use_uc = true;
2268	}
2269
2270	ew32(RCTL, rctl);
2271
2272	/* 82542 2.0 needs to be in reset to write receive address registers */
2273
2274	if (hw->mac_type == e1000_82542_rev2_0)
2275		e1000_enter_82542_rst(adapter);
2276
2277	/* load the first 14 addresses into the exact filters 1-14. Unicast
2278	 * addresses take precedence to avoid disabling unicast filtering
2279	 * when possible.
2280	 *
2281	 * RAR 0 is used for the station MAC address
2282	 * if there are not 14 addresses, go ahead and clear the filters
2283	 */
2284	i = 1;
2285	if (use_uc)
2286		netdev_for_each_uc_addr(ha, netdev) {
2287			if (i == rar_entries)
2288				break;
2289			e1000_rar_set(hw, ha->addr, i++);
2290		}
2291
2292	netdev_for_each_mc_addr(ha, netdev) {
2293		if (i == rar_entries) {
2294			/* load any remaining addresses into the hash table */
2295			u32 hash_reg, hash_bit, mta;
2296			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2297			hash_reg = (hash_value >> 5) & 0x7F;
2298			hash_bit = hash_value & 0x1F;
2299			mta = (1 << hash_bit);
2300			mcarray[hash_reg] |= mta;
2301		} else {
2302			e1000_rar_set(hw, ha->addr, i++);
2303		}
2304	}
2305
2306	for (; i < rar_entries; i++) {
2307		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2308		E1000_WRITE_FLUSH();
2309		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2310		E1000_WRITE_FLUSH();
2311	}
2312
2313	/* write the hash table completely, write from bottom to avoid
2314	 * both stupid write combining chipsets, and flushing each write
2315	 */
2316	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2317		/* If we are on an 82544 has an errata where writing odd
2318		 * offsets overwrites the previous even offset, but writing
2319		 * backwards over the range solves the issue by always
2320		 * writing the odd offset first
2321		 */
2322		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2323	}
2324	E1000_WRITE_FLUSH();
2325
2326	if (hw->mac_type == e1000_82542_rev2_0)
2327		e1000_leave_82542_rst(adapter);
2328
2329	kfree(mcarray);
2330}
2331
2332/**
2333 * e1000_update_phy_info_task - get phy info
2334 * @work: work struct contained inside adapter struct
2335 *
2336 * Need to wait a few seconds after link up to get diagnostic information from
2337 * the phy
2338 */
2339static void e1000_update_phy_info_task(struct work_struct *work)
2340{
2341	struct e1000_adapter *adapter = container_of(work,
2342						     struct e1000_adapter,
2343						     phy_info_task.work);
2344
2345	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2346}
2347
2348/**
2349 * e1000_82547_tx_fifo_stall_task - task to complete work
2350 * @work: work struct contained inside adapter struct
2351 **/
2352static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2353{
2354	struct e1000_adapter *adapter = container_of(work,
2355						     struct e1000_adapter,
2356						     fifo_stall_task.work);
2357	struct e1000_hw *hw = &adapter->hw;
2358	struct net_device *netdev = adapter->netdev;
2359	u32 tctl;
2360
2361	if (atomic_read(&adapter->tx_fifo_stall)) {
2362		if ((er32(TDT) == er32(TDH)) &&
2363		   (er32(TDFT) == er32(TDFH)) &&
2364		   (er32(TDFTS) == er32(TDFHS))) {
2365			tctl = er32(TCTL);
2366			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2367			ew32(TDFT, adapter->tx_head_addr);
2368			ew32(TDFH, adapter->tx_head_addr);
2369			ew32(TDFTS, adapter->tx_head_addr);
2370			ew32(TDFHS, adapter->tx_head_addr);
2371			ew32(TCTL, tctl);
2372			E1000_WRITE_FLUSH();
2373
2374			adapter->tx_fifo_head = 0;
2375			atomic_set(&adapter->tx_fifo_stall, 0);
2376			netif_wake_queue(netdev);
2377		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2378			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2379		}
2380	}
2381}
2382
2383bool e1000_has_link(struct e1000_adapter *adapter)
2384{
2385	struct e1000_hw *hw = &adapter->hw;
2386	bool link_active = false;
2387
2388	/* get_link_status is set on LSC (link status) interrupt or rx
2389	 * sequence error interrupt (except on intel ce4100).
2390	 * get_link_status will stay false until the
2391	 * e1000_check_for_link establishes link for copper adapters
2392	 * ONLY
2393	 */
2394	switch (hw->media_type) {
2395	case e1000_media_type_copper:
2396		if (hw->mac_type == e1000_ce4100)
2397			hw->get_link_status = 1;
2398		if (hw->get_link_status) {
2399			e1000_check_for_link(hw);
2400			link_active = !hw->get_link_status;
2401		} else {
2402			link_active = true;
2403		}
2404		break;
2405	case e1000_media_type_fiber:
2406		e1000_check_for_link(hw);
2407		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2408		break;
2409	case e1000_media_type_internal_serdes:
2410		e1000_check_for_link(hw);
2411		link_active = hw->serdes_has_link;
2412		break;
2413	default:
2414		break;
2415	}
2416
2417	return link_active;
2418}
2419
2420/**
2421 * e1000_watchdog - work function
2422 * @work: work struct contained inside adapter struct
2423 **/
2424static void e1000_watchdog(struct work_struct *work)
2425{
2426	struct e1000_adapter *adapter = container_of(work,
2427						     struct e1000_adapter,
2428						     watchdog_task.work);
2429	struct e1000_hw *hw = &adapter->hw;
2430	struct net_device *netdev = adapter->netdev;
2431	struct e1000_tx_ring *txdr = adapter->tx_ring;
2432	u32 link, tctl;
2433
2434	link = e1000_has_link(adapter);
2435	if ((netif_carrier_ok(netdev)) && link)
2436		goto link_up;
2437
2438	if (link) {
2439		if (!netif_carrier_ok(netdev)) {
2440			u32 ctrl;
2441			/* update snapshot of PHY registers on LSC */
2442			e1000_get_speed_and_duplex(hw,
2443						   &adapter->link_speed,
2444						   &adapter->link_duplex);
2445
2446			ctrl = er32(CTRL);
2447			pr_info("%s NIC Link is Up %d Mbps %s, "
2448				"Flow Control: %s\n",
2449				netdev->name,
2450				adapter->link_speed,
2451				adapter->link_duplex == FULL_DUPLEX ?
2452				"Full Duplex" : "Half Duplex",
2453				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2454				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2455				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2456				E1000_CTRL_TFCE) ? "TX" : "None")));
2457
2458			/* adjust timeout factor according to speed/duplex */
2459			adapter->tx_timeout_factor = 1;
2460			switch (adapter->link_speed) {
2461			case SPEED_10:
2462				adapter->tx_timeout_factor = 16;
2463				break;
2464			case SPEED_100:
2465				/* maybe add some timeout factor ? */
2466				break;
2467			}
2468
2469			/* enable transmits in the hardware */
2470			tctl = er32(TCTL);
2471			tctl |= E1000_TCTL_EN;
2472			ew32(TCTL, tctl);
2473
2474			netif_carrier_on(netdev);
2475			if (!test_bit(__E1000_DOWN, &adapter->flags))
2476				schedule_delayed_work(&adapter->phy_info_task,
2477						      2 * HZ);
2478			adapter->smartspeed = 0;
2479		}
2480	} else {
2481		if (netif_carrier_ok(netdev)) {
2482			adapter->link_speed = 0;
2483			adapter->link_duplex = 0;
2484			pr_info("%s NIC Link is Down\n",
2485				netdev->name);
2486			netif_carrier_off(netdev);
2487
2488			if (!test_bit(__E1000_DOWN, &adapter->flags))
2489				schedule_delayed_work(&adapter->phy_info_task,
2490						      2 * HZ);
2491		}
2492
2493		e1000_smartspeed(adapter);
2494	}
2495
2496link_up:
2497	e1000_update_stats(adapter);
2498
2499	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2500	adapter->tpt_old = adapter->stats.tpt;
2501	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2502	adapter->colc_old = adapter->stats.colc;
2503
2504	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2505	adapter->gorcl_old = adapter->stats.gorcl;
2506	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2507	adapter->gotcl_old = adapter->stats.gotcl;
2508
2509	e1000_update_adaptive(hw);
2510
2511	if (!netif_carrier_ok(netdev)) {
2512		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2513			/* We've lost link, so the controller stops DMA,
2514			 * but we've got queued Tx work that's never going
2515			 * to get done, so reset controller to flush Tx.
2516			 * (Do the reset outside of interrupt context).
2517			 */
2518			adapter->tx_timeout_count++;
2519			schedule_work(&adapter->reset_task);
2520			/* exit immediately since reset is imminent */
2521			return;
2522		}
2523	}
2524
2525	/* Simple mode for Interrupt Throttle Rate (ITR) */
2526	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2527		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2528		 * Total asymmetrical Tx or Rx gets ITR=8000;
2529		 * everyone else is between 2000-8000.
2530		 */
2531		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2532		u32 dif = (adapter->gotcl > adapter->gorcl ?
2533			    adapter->gotcl - adapter->gorcl :
2534			    adapter->gorcl - adapter->gotcl) / 10000;
2535		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2536
2537		ew32(ITR, 1000000000 / (itr * 256));
2538	}
2539
2540	/* Cause software interrupt to ensure rx ring is cleaned */
2541	ew32(ICS, E1000_ICS_RXDMT0);
2542
2543	/* Force detection of hung controller every watchdog period */
2544	adapter->detect_tx_hung = true;
2545
2546	/* Reschedule the task */
2547	if (!test_bit(__E1000_DOWN, &adapter->flags))
2548		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2549}
2550
2551enum latency_range {
2552	lowest_latency = 0,
2553	low_latency = 1,
2554	bulk_latency = 2,
2555	latency_invalid = 255
2556};
2557
2558/**
2559 * e1000_update_itr - update the dynamic ITR value based on statistics
2560 * @adapter: pointer to adapter
2561 * @itr_setting: current adapter->itr
2562 * @packets: the number of packets during this measurement interval
2563 * @bytes: the number of bytes during this measurement interval
2564 *
2565 *      Stores a new ITR value based on packets and byte
2566 *      counts during the last interrupt.  The advantage of per interrupt
2567 *      computation is faster updates and more accurate ITR for the current
2568 *      traffic pattern.  Constants in this function were computed
2569 *      based on theoretical maximum wire speed and thresholds were set based
2570 *      on testing data as well as attempting to minimize response time
2571 *      while increasing bulk throughput.
2572 *      this functionality is controlled by the InterruptThrottleRate module
2573 *      parameter (see e1000_param.c)
2574 **/
2575static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2576				     u16 itr_setting, int packets, int bytes)
2577{
2578	unsigned int retval = itr_setting;
2579	struct e1000_hw *hw = &adapter->hw;
2580
2581	if (unlikely(hw->mac_type < e1000_82540))
2582		goto update_itr_done;
2583
2584	if (packets == 0)
2585		goto update_itr_done;
2586
2587	switch (itr_setting) {
2588	case lowest_latency:
2589		/* jumbo frames get bulk treatment*/
2590		if (bytes/packets > 8000)
2591			retval = bulk_latency;
2592		else if ((packets < 5) && (bytes > 512))
2593			retval = low_latency;
2594		break;
2595	case low_latency:  /* 50 usec aka 20000 ints/s */
2596		if (bytes > 10000) {
2597			/* jumbo frames need bulk latency setting */
2598			if (bytes/packets > 8000)
2599				retval = bulk_latency;
2600			else if ((packets < 10) || ((bytes/packets) > 1200))
2601				retval = bulk_latency;
2602			else if ((packets > 35))
2603				retval = lowest_latency;
2604		} else if (bytes/packets > 2000)
2605			retval = bulk_latency;
2606		else if (packets <= 2 && bytes < 512)
2607			retval = lowest_latency;
2608		break;
2609	case bulk_latency: /* 250 usec aka 4000 ints/s */
2610		if (bytes > 25000) {
2611			if (packets > 35)
2612				retval = low_latency;
2613		} else if (bytes < 6000) {
2614			retval = low_latency;
2615		}
2616		break;
2617	}
2618
2619update_itr_done:
2620	return retval;
2621}
2622
2623static void e1000_set_itr(struct e1000_adapter *adapter)
2624{
2625	struct e1000_hw *hw = &adapter->hw;
2626	u16 current_itr;
2627	u32 new_itr = adapter->itr;
2628
2629	if (unlikely(hw->mac_type < e1000_82540))
2630		return;
2631
2632	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2633	if (unlikely(adapter->link_speed != SPEED_1000)) {
2634		current_itr = 0;
2635		new_itr = 4000;
2636		goto set_itr_now;
2637	}
2638
2639	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2640					   adapter->total_tx_packets,
2641					   adapter->total_tx_bytes);
2642	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2643	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2644		adapter->tx_itr = low_latency;
2645
2646	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2647					   adapter->total_rx_packets,
2648					   adapter->total_rx_bytes);
2649	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2650	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2651		adapter->rx_itr = low_latency;
2652
2653	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2654
2655	switch (current_itr) {
2656	/* counts and packets in update_itr are dependent on these numbers */
2657	case lowest_latency:
2658		new_itr = 70000;
2659		break;
2660	case low_latency:
2661		new_itr = 20000; /* aka hwitr = ~200 */
2662		break;
2663	case bulk_latency:
2664		new_itr = 4000;
2665		break;
2666	default:
2667		break;
2668	}
2669
2670set_itr_now:
2671	if (new_itr != adapter->itr) {
2672		/* this attempts to bias the interrupt rate towards Bulk
2673		 * by adding intermediate steps when interrupt rate is
2674		 * increasing
2675		 */
2676		new_itr = new_itr > adapter->itr ?
2677			  min(adapter->itr + (new_itr >> 2), new_itr) :
2678			  new_itr;
2679		adapter->itr = new_itr;
2680		ew32(ITR, 1000000000 / (new_itr * 256));
2681	}
2682}
2683
2684#define E1000_TX_FLAGS_CSUM		0x00000001
2685#define E1000_TX_FLAGS_VLAN		0x00000002
2686#define E1000_TX_FLAGS_TSO		0x00000004
2687#define E1000_TX_FLAGS_IPV4		0x00000008
2688#define E1000_TX_FLAGS_NO_FCS		0x00000010
2689#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2690#define E1000_TX_FLAGS_VLAN_SHIFT	16
2691
2692static int e1000_tso(struct e1000_adapter *adapter,
2693		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2694		     __be16 protocol)
2695{
2696	struct e1000_context_desc *context_desc;
2697	struct e1000_tx_buffer *buffer_info;
2698	unsigned int i;
2699	u32 cmd_length = 0;
2700	u16 ipcse = 0, tucse, mss;
2701	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2702
2703	if (skb_is_gso(skb)) {
2704		int err;
2705
2706		err = skb_cow_head(skb, 0);
2707		if (err < 0)
2708			return err;
2709
2710		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2711		mss = skb_shinfo(skb)->gso_size;
2712		if (protocol == htons(ETH_P_IP)) {
2713			struct iphdr *iph = ip_hdr(skb);
2714			iph->tot_len = 0;
2715			iph->check = 0;
2716			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2717								 iph->daddr, 0,
2718								 IPPROTO_TCP,
2719								 0);
2720			cmd_length = E1000_TXD_CMD_IP;
2721			ipcse = skb_transport_offset(skb) - 1;
2722		} else if (skb_is_gso_v6(skb)) {
2723			tcp_v6_gso_csum_prep(skb);
2724			ipcse = 0;
2725		}
2726		ipcss = skb_network_offset(skb);
2727		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2728		tucss = skb_transport_offset(skb);
2729		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2730		tucse = 0;
2731
2732		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2733			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2734
2735		i = tx_ring->next_to_use;
2736		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2737		buffer_info = &tx_ring->buffer_info[i];
2738
2739		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2740		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2741		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2742		context_desc->upper_setup.tcp_fields.tucss = tucss;
2743		context_desc->upper_setup.tcp_fields.tucso = tucso;
2744		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2745		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2746		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2747		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2748
2749		buffer_info->time_stamp = jiffies;
2750		buffer_info->next_to_watch = i;
2751
2752		if (++i == tx_ring->count)
2753			i = 0;
2754
2755		tx_ring->next_to_use = i;
2756
2757		return true;
2758	}
2759	return false;
2760}
2761
2762static bool e1000_tx_csum(struct e1000_adapter *adapter,
2763			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2764			  __be16 protocol)
2765{
2766	struct e1000_context_desc *context_desc;
2767	struct e1000_tx_buffer *buffer_info;
2768	unsigned int i;
2769	u8 css;
2770	u32 cmd_len = E1000_TXD_CMD_DEXT;
2771
2772	if (skb->ip_summed != CHECKSUM_PARTIAL)
2773		return false;
2774
2775	switch (protocol) {
2776	case cpu_to_be16(ETH_P_IP):
2777		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2778			cmd_len |= E1000_TXD_CMD_TCP;
2779		break;
2780	case cpu_to_be16(ETH_P_IPV6):
2781		/* XXX not handling all IPV6 headers */
2782		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2783			cmd_len |= E1000_TXD_CMD_TCP;
2784		break;
2785	default:
2786		if (unlikely(net_ratelimit()))
2787			e_warn(drv, "checksum_partial proto=%x!\n",
2788			       skb->protocol);
2789		break;
2790	}
2791
2792	css = skb_checksum_start_offset(skb);
2793
2794	i = tx_ring->next_to_use;
2795	buffer_info = &tx_ring->buffer_info[i];
2796	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2797
2798	context_desc->lower_setup.ip_config = 0;
2799	context_desc->upper_setup.tcp_fields.tucss = css;
2800	context_desc->upper_setup.tcp_fields.tucso =
2801		css + skb->csum_offset;
2802	context_desc->upper_setup.tcp_fields.tucse = 0;
2803	context_desc->tcp_seg_setup.data = 0;
2804	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2805
2806	buffer_info->time_stamp = jiffies;
2807	buffer_info->next_to_watch = i;
2808
2809	if (unlikely(++i == tx_ring->count))
2810		i = 0;
2811
2812	tx_ring->next_to_use = i;
2813
2814	return true;
2815}
2816
2817#define E1000_MAX_TXD_PWR	12
2818#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2819
2820static int e1000_tx_map(struct e1000_adapter *adapter,
2821			struct e1000_tx_ring *tx_ring,
2822			struct sk_buff *skb, unsigned int first,
2823			unsigned int max_per_txd, unsigned int nr_frags,
2824			unsigned int mss)
2825{
2826	struct e1000_hw *hw = &adapter->hw;
2827	struct pci_dev *pdev = adapter->pdev;
2828	struct e1000_tx_buffer *buffer_info;
2829	unsigned int len = skb_headlen(skb);
2830	unsigned int offset = 0, size, count = 0, i;
2831	unsigned int f, bytecount, segs;
2832
2833	i = tx_ring->next_to_use;
2834
2835	while (len) {
2836		buffer_info = &tx_ring->buffer_info[i];
2837		size = min(len, max_per_txd);
2838		/* Workaround for Controller erratum --
2839		 * descriptor for non-tso packet in a linear SKB that follows a
2840		 * tso gets written back prematurely before the data is fully
2841		 * DMA'd to the controller
2842		 */
2843		if (!skb->data_len && tx_ring->last_tx_tso &&
2844		    !skb_is_gso(skb)) {
2845			tx_ring->last_tx_tso = false;
2846			size -= 4;
2847		}
2848
2849		/* Workaround for premature desc write-backs
2850		 * in TSO mode.  Append 4-byte sentinel desc
2851		 */
2852		if (unlikely(mss && !nr_frags && size == len && size > 8))
2853			size -= 4;
2854		/* work-around for errata 10 and it applies
2855		 * to all controllers in PCI-X mode
2856		 * The fix is to make sure that the first descriptor of a
2857		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2858		 */
2859		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2860			     (size > 2015) && count == 0))
2861			size = 2015;
2862
2863		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2864		 * terminating buffers within evenly-aligned dwords.
2865		 */
2866		if (unlikely(adapter->pcix_82544 &&
2867		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2868		   size > 4))
2869			size -= 4;
2870
2871		buffer_info->length = size;
2872		/* set time_stamp *before* dma to help avoid a possible race */
2873		buffer_info->time_stamp = jiffies;
2874		buffer_info->mapped_as_page = false;
2875		buffer_info->dma = dma_map_single(&pdev->dev,
2876						  skb->data + offset,
2877						  size, DMA_TO_DEVICE);
2878		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2879			goto dma_error;
2880		buffer_info->next_to_watch = i;
2881
2882		len -= size;
2883		offset += size;
2884		count++;
2885		if (len) {
2886			i++;
2887			if (unlikely(i == tx_ring->count))
2888				i = 0;
2889		}
2890	}
2891
2892	for (f = 0; f < nr_frags; f++) {
2893		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2894
2895		len = skb_frag_size(frag);
2896		offset = 0;
2897
2898		while (len) {
2899			unsigned long bufend;
2900			i++;
2901			if (unlikely(i == tx_ring->count))
2902				i = 0;
2903
2904			buffer_info = &tx_ring->buffer_info[i];
2905			size = min(len, max_per_txd);
2906			/* Workaround for premature desc write-backs
2907			 * in TSO mode.  Append 4-byte sentinel desc
2908			 */
2909			if (unlikely(mss && f == (nr_frags-1) &&
2910			    size == len && size > 8))
2911				size -= 4;
2912			/* Workaround for potential 82544 hang in PCI-X.
2913			 * Avoid terminating buffers within evenly-aligned
2914			 * dwords.
2915			 */
2916			bufend = (unsigned long)
2917				page_to_phys(skb_frag_page(frag));
2918			bufend += offset + size - 1;
2919			if (unlikely(adapter->pcix_82544 &&
2920				     !(bufend & 4) &&
2921				     size > 4))
2922				size -= 4;
2923
2924			buffer_info->length = size;
2925			buffer_info->time_stamp = jiffies;
2926			buffer_info->mapped_as_page = true;
2927			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2928						offset, size, DMA_TO_DEVICE);
2929			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2930				goto dma_error;
2931			buffer_info->next_to_watch = i;
2932
2933			len -= size;
2934			offset += size;
2935			count++;
2936		}
2937	}
2938
2939	segs = skb_shinfo(skb)->gso_segs ?: 1;
2940	/* multiply data chunks by size of headers */
2941	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2942
2943	tx_ring->buffer_info[i].skb = skb;
2944	tx_ring->buffer_info[i].segs = segs;
2945	tx_ring->buffer_info[i].bytecount = bytecount;
2946	tx_ring->buffer_info[first].next_to_watch = i;
2947
2948	return count;
2949
2950dma_error:
2951	dev_err(&pdev->dev, "TX DMA map failed\n");
2952	buffer_info->dma = 0;
2953	if (count)
2954		count--;
2955
2956	while (count--) {
2957		if (i == 0)
2958			i += tx_ring->count;
2959		i--;
2960		buffer_info = &tx_ring->buffer_info[i];
2961		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2962	}
2963
2964	return 0;
2965}
2966
2967static void e1000_tx_queue(struct e1000_adapter *adapter,
2968			   struct e1000_tx_ring *tx_ring, int tx_flags,
2969			   int count)
2970{
2971	struct e1000_tx_desc *tx_desc = NULL;
2972	struct e1000_tx_buffer *buffer_info;
2973	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2974	unsigned int i;
2975
2976	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2977		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2978			     E1000_TXD_CMD_TSE;
2979		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2980
2981		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2982			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2983	}
2984
2985	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2986		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2987		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2988	}
2989
2990	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2991		txd_lower |= E1000_TXD_CMD_VLE;
2992		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2993	}
2994
2995	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2996		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2997
2998	i = tx_ring->next_to_use;
2999
3000	while (count--) {
3001		buffer_info = &tx_ring->buffer_info[i];
3002		tx_desc = E1000_TX_DESC(*tx_ring, i);
3003		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3004		tx_desc->lower.data =
3005			cpu_to_le32(txd_lower | buffer_info->length);
3006		tx_desc->upper.data = cpu_to_le32(txd_upper);
3007		if (unlikely(++i == tx_ring->count))
3008			i = 0;
3009	}
3010
3011	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3012
3013	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3014	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3016
3017	/* Force memory writes to complete before letting h/w
3018	 * know there are new descriptors to fetch.  (Only
3019	 * applicable for weak-ordered memory model archs,
3020	 * such as IA-64).
3021	 */
3022	dma_wmb();
3023
3024	tx_ring->next_to_use = i;
3025}
3026
3027/* 82547 workaround to avoid controller hang in half-duplex environment.
3028 * The workaround is to avoid queuing a large packet that would span
3029 * the internal Tx FIFO ring boundary by notifying the stack to resend
3030 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3031 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3032 * to the beginning of the Tx FIFO.
3033 */
3034
3035#define E1000_FIFO_HDR			0x10
3036#define E1000_82547_PAD_LEN		0x3E0
3037
3038static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3039				       struct sk_buff *skb)
3040{
3041	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3042	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3043
3044	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3045
3046	if (adapter->link_duplex != HALF_DUPLEX)
3047		goto no_fifo_stall_required;
3048
3049	if (atomic_read(&adapter->tx_fifo_stall))
3050		return 1;
3051
3052	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3053		atomic_set(&adapter->tx_fifo_stall, 1);
3054		return 1;
3055	}
3056
3057no_fifo_stall_required:
3058	adapter->tx_fifo_head += skb_fifo_len;
3059	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3060		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3061	return 0;
3062}
3063
3064static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3065{
3066	struct e1000_adapter *adapter = netdev_priv(netdev);
3067	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3068
3069	netif_stop_queue(netdev);
3070	/* Herbert's original patch had:
3071	 *  smp_mb__after_netif_stop_queue();
3072	 * but since that doesn't exist yet, just open code it.
3073	 */
3074	smp_mb();
3075
3076	/* We need to check again in a case another CPU has just
3077	 * made room available.
3078	 */
3079	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3080		return -EBUSY;
3081
3082	/* A reprieve! */
3083	netif_start_queue(netdev);
3084	++adapter->restart_queue;
3085	return 0;
3086}
3087
3088static int e1000_maybe_stop_tx(struct net_device *netdev,
3089			       struct e1000_tx_ring *tx_ring, int size)
3090{
3091	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3092		return 0;
3093	return __e1000_maybe_stop_tx(netdev, size);
3094}
3095
3096#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3097static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3098				    struct net_device *netdev)
3099{
3100	struct e1000_adapter *adapter = netdev_priv(netdev);
3101	struct e1000_hw *hw = &adapter->hw;
3102	struct e1000_tx_ring *tx_ring;
3103	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3104	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3105	unsigned int tx_flags = 0;
3106	unsigned int len = skb_headlen(skb);
3107	unsigned int nr_frags;
3108	unsigned int mss;
3109	int count = 0;
3110	int tso;
3111	unsigned int f;
3112	__be16 protocol = vlan_get_protocol(skb);
3113
3114	/* This goes back to the question of how to logically map a Tx queue
3115	 * to a flow.  Right now, performance is impacted slightly negatively
3116	 * if using multiple Tx queues.  If the stack breaks away from a
3117	 * single qdisc implementation, we can look at this again.
3118	 */
3119	tx_ring = adapter->tx_ring;
3120
3121	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3122	 * packets may get corrupted during padding by HW.
3123	 * To WA this issue, pad all small packets manually.
3124	 */
3125	if (eth_skb_pad(skb))
3126		return NETDEV_TX_OK;
3127
3128	mss = skb_shinfo(skb)->gso_size;
3129	/* The controller does a simple calculation to
3130	 * make sure there is enough room in the FIFO before
3131	 * initiating the DMA for each buffer.  The calc is:
3132	 * 4 = ceil(buffer len/mss).  To make sure we don't
3133	 * overrun the FIFO, adjust the max buffer len if mss
3134	 * drops.
3135	 */
3136	if (mss) {
3137		u8 hdr_len;
3138		max_per_txd = min(mss << 2, max_per_txd);
3139		max_txd_pwr = fls(max_per_txd) - 1;
3140
3141		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3142		if (skb->data_len && hdr_len == len) {
3143			switch (hw->mac_type) {
3144			case e1000_82544: {
3145				unsigned int pull_size;
3146
3147				/* Make sure we have room to chop off 4 bytes,
3148				 * and that the end alignment will work out to
3149				 * this hardware's requirements
3150				 * NOTE: this is a TSO only workaround
3151				 * if end byte alignment not correct move us
3152				 * into the next dword
3153				 */
3154				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3155				    & 4)
3156					break;
3157				pull_size = min((unsigned int)4, skb->data_len);
3158				if (!__pskb_pull_tail(skb, pull_size)) {
3159					e_err(drv, "__pskb_pull_tail "
3160					      "failed.\n");
3161					dev_kfree_skb_any(skb);
3162					return NETDEV_TX_OK;
3163				}
3164				len = skb_headlen(skb);
3165				break;
3166			}
3167			default:
3168				/* do nothing */
3169				break;
3170			}
3171		}
3172	}
3173
3174	/* reserve a descriptor for the offload context */
3175	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3176		count++;
3177	count++;
3178
3179	/* Controller Erratum workaround */
3180	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3181		count++;
3182
3183	count += TXD_USE_COUNT(len, max_txd_pwr);
3184
3185	if (adapter->pcix_82544)
3186		count++;
3187
3188	/* work-around for errata 10 and it applies to all controllers
3189	 * in PCI-X mode, so add one more descriptor to the count
3190	 */
3191	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3192			(len > 2015)))
3193		count++;
3194
3195	nr_frags = skb_shinfo(skb)->nr_frags;
3196	for (f = 0; f < nr_frags; f++)
3197		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3198				       max_txd_pwr);
3199	if (adapter->pcix_82544)
3200		count += nr_frags;
3201
3202	/* need: count + 2 desc gap to keep tail from touching
3203	 * head, otherwise try next time
3204	 */
3205	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3206		return NETDEV_TX_BUSY;
3207
3208	if (unlikely((hw->mac_type == e1000_82547) &&
3209		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3210		netif_stop_queue(netdev);
3211		if (!test_bit(__E1000_DOWN, &adapter->flags))
3212			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3213		return NETDEV_TX_BUSY;
3214	}
3215
3216	if (skb_vlan_tag_present(skb)) {
3217		tx_flags |= E1000_TX_FLAGS_VLAN;
3218		tx_flags |= (skb_vlan_tag_get(skb) <<
3219			     E1000_TX_FLAGS_VLAN_SHIFT);
3220	}
3221
3222	first = tx_ring->next_to_use;
3223
3224	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3225	if (tso < 0) {
3226		dev_kfree_skb_any(skb);
3227		return NETDEV_TX_OK;
3228	}
3229
3230	if (likely(tso)) {
3231		if (likely(hw->mac_type != e1000_82544))
3232			tx_ring->last_tx_tso = true;
3233		tx_flags |= E1000_TX_FLAGS_TSO;
3234	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3235		tx_flags |= E1000_TX_FLAGS_CSUM;
3236
3237	if (protocol == htons(ETH_P_IP))
3238		tx_flags |= E1000_TX_FLAGS_IPV4;
3239
3240	if (unlikely(skb->no_fcs))
3241		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3242
3243	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3244			     nr_frags, mss);
3245
3246	if (count) {
3247		/* The descriptors needed is higher than other Intel drivers
3248		 * due to a number of workarounds.  The breakdown is below:
3249		 * Data descriptors: MAX_SKB_FRAGS + 1
3250		 * Context Descriptor: 1
3251		 * Keep head from touching tail: 2
3252		 * Workarounds: 3
3253		 */
3254		int desc_needed = MAX_SKB_FRAGS + 7;
3255
3256		netdev_sent_queue(netdev, skb->len);
3257		skb_tx_timestamp(skb);
3258
3259		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3260
3261		/* 82544 potentially requires twice as many data descriptors
3262		 * in order to guarantee buffers don't end on evenly-aligned
3263		 * dwords
3264		 */
3265		if (adapter->pcix_82544)
3266			desc_needed += MAX_SKB_FRAGS + 1;
3267
3268		/* Make sure there is space in the ring for the next send. */
3269		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3270
3271		if (!netdev_xmit_more() ||
3272		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3273			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3274		}
3275	} else {
3276		dev_kfree_skb_any(skb);
3277		tx_ring->buffer_info[first].time_stamp = 0;
3278		tx_ring->next_to_use = first;
3279	}
3280
3281	return NETDEV_TX_OK;
3282}
3283
3284#define NUM_REGS 38 /* 1 based count */
3285static void e1000_regdump(struct e1000_adapter *adapter)
3286{
3287	struct e1000_hw *hw = &adapter->hw;
3288	u32 regs[NUM_REGS];
3289	u32 *regs_buff = regs;
3290	int i = 0;
3291
3292	static const char * const reg_name[] = {
3293		"CTRL",  "STATUS",
3294		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296		"TIDV", "TXDCTL", "TADV", "TARC0",
3297		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298		"TXDCTL1", "TARC1",
3299		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3302	};
3303
3304	regs_buff[0]  = er32(CTRL);
3305	regs_buff[1]  = er32(STATUS);
3306
3307	regs_buff[2]  = er32(RCTL);
3308	regs_buff[3]  = er32(RDLEN);
3309	regs_buff[4]  = er32(RDH);
3310	regs_buff[5]  = er32(RDT);
3311	regs_buff[6]  = er32(RDTR);
3312
3313	regs_buff[7]  = er32(TCTL);
3314	regs_buff[8]  = er32(TDBAL);
3315	regs_buff[9]  = er32(TDBAH);
3316	regs_buff[10] = er32(TDLEN);
3317	regs_buff[11] = er32(TDH);
3318	regs_buff[12] = er32(TDT);
3319	regs_buff[13] = er32(TIDV);
3320	regs_buff[14] = er32(TXDCTL);
3321	regs_buff[15] = er32(TADV);
3322	regs_buff[16] = er32(TARC0);
3323
3324	regs_buff[17] = er32(TDBAL1);
3325	regs_buff[18] = er32(TDBAH1);
3326	regs_buff[19] = er32(TDLEN1);
3327	regs_buff[20] = er32(TDH1);
3328	regs_buff[21] = er32(TDT1);
3329	regs_buff[22] = er32(TXDCTL1);
3330	regs_buff[23] = er32(TARC1);
3331	regs_buff[24] = er32(CTRL_EXT);
3332	regs_buff[25] = er32(ERT);
3333	regs_buff[26] = er32(RDBAL0);
3334	regs_buff[27] = er32(RDBAH0);
3335	regs_buff[28] = er32(TDFH);
3336	regs_buff[29] = er32(TDFT);
3337	regs_buff[30] = er32(TDFHS);
3338	regs_buff[31] = er32(TDFTS);
3339	regs_buff[32] = er32(TDFPC);
3340	regs_buff[33] = er32(RDFH);
3341	regs_buff[34] = er32(RDFT);
3342	regs_buff[35] = er32(RDFHS);
3343	regs_buff[36] = er32(RDFTS);
3344	regs_buff[37] = er32(RDFPC);
3345
3346	pr_info("Register dump\n");
3347	for (i = 0; i < NUM_REGS; i++)
3348		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3349}
3350
3351/*
3352 * e1000_dump: Print registers, tx ring and rx ring
3353 */
3354static void e1000_dump(struct e1000_adapter *adapter)
3355{
3356	/* this code doesn't handle multiple rings */
3357	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3359	int i;
3360
3361	if (!netif_msg_hw(adapter))
3362		return;
3363
3364	/* Print Registers */
3365	e1000_regdump(adapter);
3366
3367	/* transmit dump */
3368	pr_info("TX Desc ring0 dump\n");
3369
3370	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3371	 *
3372	 * Legacy Transmit Descriptor
3373	 *   +--------------------------------------------------------------+
3374	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3375	 *   +--------------------------------------------------------------+
3376	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3377	 *   +--------------------------------------------------------------+
3378	 *   63       48 47        36 35    32 31     24 23    16 15        0
3379	 *
3380	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3381	 *   63      48 47    40 39       32 31             16 15    8 7      0
3382	 *   +----------------------------------------------------------------+
3383	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3384	 *   +----------------------------------------------------------------+
3385	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3386	 *   +----------------------------------------------------------------+
3387	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3388	 *
3389	 * Extended Data Descriptor (DTYP=0x1)
3390	 *   +----------------------------------------------------------------+
3391	 * 0 |                     Buffer Address [63:0]                      |
3392	 *   +----------------------------------------------------------------+
3393	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3394	 *   +----------------------------------------------------------------+
3395	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3396	 */
3397	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3398	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399
3400	if (!netif_msg_tx_done(adapter))
3401		goto rx_ring_summary;
3402
3403	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406		struct my_u { __le64 a; __le64 b; };
3407		struct my_u *u = (struct my_u *)tx_desc;
3408		const char *type;
3409
3410		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411			type = "NTC/U";
3412		else if (i == tx_ring->next_to_use)
3413			type = "NTU";
3414		else if (i == tx_ring->next_to_clean)
3415			type = "NTC";
3416		else
3417			type = "";
3418
3419		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3420			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421			le64_to_cpu(u->a), le64_to_cpu(u->b),
3422			(u64)buffer_info->dma, buffer_info->length,
3423			buffer_info->next_to_watch,
3424			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3425	}
3426
3427rx_ring_summary:
3428	/* receive dump */
3429	pr_info("\nRX Desc ring dump\n");
3430
3431	/* Legacy Receive Descriptor Format
3432	 *
3433	 * +-----------------------------------------------------+
3434	 * |                Buffer Address [63:0]                |
3435	 * +-----------------------------------------------------+
3436	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437	 * +-----------------------------------------------------+
3438	 * 63       48 47    40 39      32 31         16 15      0
3439	 */
3440	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3441
3442	if (!netif_msg_rx_status(adapter))
3443		goto exit;
3444
3445	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448		struct my_u { __le64 a; __le64 b; };
3449		struct my_u *u = (struct my_u *)rx_desc;
3450		const char *type;
3451
3452		if (i == rx_ring->next_to_use)
3453			type = "NTU";
3454		else if (i == rx_ring->next_to_clean)
3455			type = "NTC";
3456		else
3457			type = "";
3458
3459		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3460			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3462	} /* for */
3463
3464	/* dump the descriptor caches */
3465	/* rx */
3466	pr_info("Rx descriptor cache in 64bit format\n");
3467	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469			i,
3470			readl(adapter->hw.hw_addr + i+4),
3471			readl(adapter->hw.hw_addr + i),
3472			readl(adapter->hw.hw_addr + i+12),
3473			readl(adapter->hw.hw_addr + i+8));
3474	}
3475	/* tx */
3476	pr_info("Tx descriptor cache in 64bit format\n");
3477	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479			i,
3480			readl(adapter->hw.hw_addr + i+4),
3481			readl(adapter->hw.hw_addr + i),
3482			readl(adapter->hw.hw_addr + i+12),
3483			readl(adapter->hw.hw_addr + i+8));
3484	}
3485exit:
3486	return;
3487}
3488
3489/**
3490 * e1000_tx_timeout - Respond to a Tx Hang
3491 * @netdev: network interface device structure
 
3492 **/
3493static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3494{
3495	struct e1000_adapter *adapter = netdev_priv(netdev);
3496
3497	/* Do the reset outside of interrupt context */
3498	adapter->tx_timeout_count++;
3499	schedule_work(&adapter->reset_task);
3500}
3501
3502static void e1000_reset_task(struct work_struct *work)
3503{
3504	struct e1000_adapter *adapter =
3505		container_of(work, struct e1000_adapter, reset_task);
3506
3507	e_err(drv, "Reset adapter\n");
3508	e1000_reinit_locked(adapter);
3509}
3510
3511/**
3512 * e1000_change_mtu - Change the Maximum Transfer Unit
3513 * @netdev: network interface device structure
3514 * @new_mtu: new value for maximum frame size
3515 *
3516 * Returns 0 on success, negative on failure
3517 **/
3518static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3519{
3520	struct e1000_adapter *adapter = netdev_priv(netdev);
3521	struct e1000_hw *hw = &adapter->hw;
3522	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3523
3524	/* Adapter-specific max frame size limits. */
3525	switch (hw->mac_type) {
3526	case e1000_undefined ... e1000_82542_rev2_1:
3527		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3528			e_err(probe, "Jumbo Frames not supported.\n");
3529			return -EINVAL;
3530		}
3531		break;
3532	default:
3533		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3534		break;
3535	}
3536
3537	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3538		msleep(1);
3539	/* e1000_down has a dependency on max_frame_size */
3540	hw->max_frame_size = max_frame;
3541	if (netif_running(netdev)) {
3542		/* prevent buffers from being reallocated */
3543		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3544		e1000_down(adapter);
3545	}
3546
3547	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3548	 * means we reserve 2 more, this pushes us to allocate from the next
3549	 * larger slab size.
3550	 * i.e. RXBUFFER_2048 --> size-4096 slab
3551	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3552	 * fragmented skbs
3553	 */
3554
3555	if (max_frame <= E1000_RXBUFFER_2048)
3556		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3557	else
3558#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3559		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3560#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3561		adapter->rx_buffer_len = PAGE_SIZE;
3562#endif
3563
3564	/* adjust allocation if LPE protects us, and we aren't using SBP */
3565	if (!hw->tbi_compatibility_on &&
3566	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3567	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3568		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3569
3570	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3571		   netdev->mtu, new_mtu);
3572	netdev->mtu = new_mtu;
3573
3574	if (netif_running(netdev))
3575		e1000_up(adapter);
3576	else
3577		e1000_reset(adapter);
3578
3579	clear_bit(__E1000_RESETTING, &adapter->flags);
3580
3581	return 0;
3582}
3583
3584/**
3585 * e1000_update_stats - Update the board statistics counters
3586 * @adapter: board private structure
3587 **/
3588void e1000_update_stats(struct e1000_adapter *adapter)
3589{
3590	struct net_device *netdev = adapter->netdev;
3591	struct e1000_hw *hw = &adapter->hw;
3592	struct pci_dev *pdev = adapter->pdev;
3593	unsigned long flags;
3594	u16 phy_tmp;
3595
3596#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3597
3598	/* Prevent stats update while adapter is being reset, or if the pci
3599	 * connection is down.
3600	 */
3601	if (adapter->link_speed == 0)
3602		return;
3603	if (pci_channel_offline(pdev))
3604		return;
3605
3606	spin_lock_irqsave(&adapter->stats_lock, flags);
3607
3608	/* these counters are modified from e1000_tbi_adjust_stats,
3609	 * called from the interrupt context, so they must only
3610	 * be written while holding adapter->stats_lock
3611	 */
3612
3613	adapter->stats.crcerrs += er32(CRCERRS);
3614	adapter->stats.gprc += er32(GPRC);
3615	adapter->stats.gorcl += er32(GORCL);
3616	adapter->stats.gorch += er32(GORCH);
3617	adapter->stats.bprc += er32(BPRC);
3618	adapter->stats.mprc += er32(MPRC);
3619	adapter->stats.roc += er32(ROC);
3620
3621	adapter->stats.prc64 += er32(PRC64);
3622	adapter->stats.prc127 += er32(PRC127);
3623	adapter->stats.prc255 += er32(PRC255);
3624	adapter->stats.prc511 += er32(PRC511);
3625	adapter->stats.prc1023 += er32(PRC1023);
3626	adapter->stats.prc1522 += er32(PRC1522);
3627
3628	adapter->stats.symerrs += er32(SYMERRS);
3629	adapter->stats.mpc += er32(MPC);
3630	adapter->stats.scc += er32(SCC);
3631	adapter->stats.ecol += er32(ECOL);
3632	adapter->stats.mcc += er32(MCC);
3633	adapter->stats.latecol += er32(LATECOL);
3634	adapter->stats.dc += er32(DC);
3635	adapter->stats.sec += er32(SEC);
3636	adapter->stats.rlec += er32(RLEC);
3637	adapter->stats.xonrxc += er32(XONRXC);
3638	adapter->stats.xontxc += er32(XONTXC);
3639	adapter->stats.xoffrxc += er32(XOFFRXC);
3640	adapter->stats.xofftxc += er32(XOFFTXC);
3641	adapter->stats.fcruc += er32(FCRUC);
3642	adapter->stats.gptc += er32(GPTC);
3643	adapter->stats.gotcl += er32(GOTCL);
3644	adapter->stats.gotch += er32(GOTCH);
3645	adapter->stats.rnbc += er32(RNBC);
3646	adapter->stats.ruc += er32(RUC);
3647	adapter->stats.rfc += er32(RFC);
3648	adapter->stats.rjc += er32(RJC);
3649	adapter->stats.torl += er32(TORL);
3650	adapter->stats.torh += er32(TORH);
3651	adapter->stats.totl += er32(TOTL);
3652	adapter->stats.toth += er32(TOTH);
3653	adapter->stats.tpr += er32(TPR);
3654
3655	adapter->stats.ptc64 += er32(PTC64);
3656	adapter->stats.ptc127 += er32(PTC127);
3657	adapter->stats.ptc255 += er32(PTC255);
3658	adapter->stats.ptc511 += er32(PTC511);
3659	adapter->stats.ptc1023 += er32(PTC1023);
3660	adapter->stats.ptc1522 += er32(PTC1522);
3661
3662	adapter->stats.mptc += er32(MPTC);
3663	adapter->stats.bptc += er32(BPTC);
3664
3665	/* used for adaptive IFS */
3666
3667	hw->tx_packet_delta = er32(TPT);
3668	adapter->stats.tpt += hw->tx_packet_delta;
3669	hw->collision_delta = er32(COLC);
3670	adapter->stats.colc += hw->collision_delta;
3671
3672	if (hw->mac_type >= e1000_82543) {
3673		adapter->stats.algnerrc += er32(ALGNERRC);
3674		adapter->stats.rxerrc += er32(RXERRC);
3675		adapter->stats.tncrs += er32(TNCRS);
3676		adapter->stats.cexterr += er32(CEXTERR);
3677		adapter->stats.tsctc += er32(TSCTC);
3678		adapter->stats.tsctfc += er32(TSCTFC);
3679	}
3680
3681	/* Fill out the OS statistics structure */
3682	netdev->stats.multicast = adapter->stats.mprc;
3683	netdev->stats.collisions = adapter->stats.colc;
3684
3685	/* Rx Errors */
3686
3687	/* RLEC on some newer hardware can be incorrect so build
3688	 * our own version based on RUC and ROC
3689	 */
3690	netdev->stats.rx_errors = adapter->stats.rxerrc +
3691		adapter->stats.crcerrs + adapter->stats.algnerrc +
3692		adapter->stats.ruc + adapter->stats.roc +
3693		adapter->stats.cexterr;
3694	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3695	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3696	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3697	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3698	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3699
3700	/* Tx Errors */
3701	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3702	netdev->stats.tx_errors = adapter->stats.txerrc;
3703	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3704	netdev->stats.tx_window_errors = adapter->stats.latecol;
3705	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3706	if (hw->bad_tx_carr_stats_fd &&
3707	    adapter->link_duplex == FULL_DUPLEX) {
3708		netdev->stats.tx_carrier_errors = 0;
3709		adapter->stats.tncrs = 0;
3710	}
3711
3712	/* Tx Dropped needs to be maintained elsewhere */
3713
3714	/* Phy Stats */
3715	if (hw->media_type == e1000_media_type_copper) {
3716		if ((adapter->link_speed == SPEED_1000) &&
3717		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3718			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3719			adapter->phy_stats.idle_errors += phy_tmp;
3720		}
3721
3722		if ((hw->mac_type <= e1000_82546) &&
3723		   (hw->phy_type == e1000_phy_m88) &&
3724		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3725			adapter->phy_stats.receive_errors += phy_tmp;
3726	}
3727
3728	/* Management Stats */
3729	if (hw->has_smbus) {
3730		adapter->stats.mgptc += er32(MGTPTC);
3731		adapter->stats.mgprc += er32(MGTPRC);
3732		adapter->stats.mgpdc += er32(MGTPDC);
3733	}
3734
3735	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3736}
3737
3738/**
3739 * e1000_intr - Interrupt Handler
3740 * @irq: interrupt number
3741 * @data: pointer to a network interface device structure
3742 **/
3743static irqreturn_t e1000_intr(int irq, void *data)
3744{
3745	struct net_device *netdev = data;
3746	struct e1000_adapter *adapter = netdev_priv(netdev);
3747	struct e1000_hw *hw = &adapter->hw;
3748	u32 icr = er32(ICR);
3749
3750	if (unlikely((!icr)))
3751		return IRQ_NONE;  /* Not our interrupt */
3752
3753	/* we might have caused the interrupt, but the above
3754	 * read cleared it, and just in case the driver is
3755	 * down there is nothing to do so return handled
3756	 */
3757	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3758		return IRQ_HANDLED;
3759
3760	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3761		hw->get_link_status = 1;
3762		/* guard against interrupt when we're going down */
3763		if (!test_bit(__E1000_DOWN, &adapter->flags))
3764			schedule_delayed_work(&adapter->watchdog_task, 1);
3765	}
3766
3767	/* disable interrupts, without the synchronize_irq bit */
3768	ew32(IMC, ~0);
3769	E1000_WRITE_FLUSH();
3770
3771	if (likely(napi_schedule_prep(&adapter->napi))) {
3772		adapter->total_tx_bytes = 0;
3773		adapter->total_tx_packets = 0;
3774		adapter->total_rx_bytes = 0;
3775		adapter->total_rx_packets = 0;
3776		__napi_schedule(&adapter->napi);
3777	} else {
3778		/* this really should not happen! if it does it is basically a
3779		 * bug, but not a hard error, so enable ints and continue
3780		 */
3781		if (!test_bit(__E1000_DOWN, &adapter->flags))
3782			e1000_irq_enable(adapter);
3783	}
3784
3785	return IRQ_HANDLED;
3786}
3787
3788/**
3789 * e1000_clean - NAPI Rx polling callback
3790 * @adapter: board private structure
 
3791 **/
3792static int e1000_clean(struct napi_struct *napi, int budget)
3793{
3794	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3795						     napi);
3796	int tx_clean_complete = 0, work_done = 0;
3797
3798	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3799
3800	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3801
3802	if (!tx_clean_complete || work_done == budget)
3803		return budget;
3804
3805	/* Exit the polling mode, but don't re-enable interrupts if stack might
3806	 * poll us due to busy-polling
3807	 */
3808	if (likely(napi_complete_done(napi, work_done))) {
3809		if (likely(adapter->itr_setting & 3))
3810			e1000_set_itr(adapter);
3811		if (!test_bit(__E1000_DOWN, &adapter->flags))
3812			e1000_irq_enable(adapter);
3813	}
3814
3815	return work_done;
3816}
3817
3818/**
3819 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3820 * @adapter: board private structure
 
3821 **/
3822static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3823			       struct e1000_tx_ring *tx_ring)
3824{
3825	struct e1000_hw *hw = &adapter->hw;
3826	struct net_device *netdev = adapter->netdev;
3827	struct e1000_tx_desc *tx_desc, *eop_desc;
3828	struct e1000_tx_buffer *buffer_info;
3829	unsigned int i, eop;
3830	unsigned int count = 0;
3831	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3832	unsigned int bytes_compl = 0, pkts_compl = 0;
3833
3834	i = tx_ring->next_to_clean;
3835	eop = tx_ring->buffer_info[i].next_to_watch;
3836	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3837
3838	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3839	       (count < tx_ring->count)) {
3840		bool cleaned = false;
3841		dma_rmb();	/* read buffer_info after eop_desc */
3842		for ( ; !cleaned; count++) {
3843			tx_desc = E1000_TX_DESC(*tx_ring, i);
3844			buffer_info = &tx_ring->buffer_info[i];
3845			cleaned = (i == eop);
3846
3847			if (cleaned) {
3848				total_tx_packets += buffer_info->segs;
3849				total_tx_bytes += buffer_info->bytecount;
3850				if (buffer_info->skb) {
3851					bytes_compl += buffer_info->skb->len;
3852					pkts_compl++;
3853				}
3854
3855			}
3856			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 
3857			tx_desc->upper.data = 0;
3858
3859			if (unlikely(++i == tx_ring->count))
3860				i = 0;
3861		}
3862
3863		eop = tx_ring->buffer_info[i].next_to_watch;
3864		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3865	}
3866
3867	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3868	 * which will reuse the cleaned buffers.
3869	 */
3870	smp_store_release(&tx_ring->next_to_clean, i);
3871
3872	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3873
3874#define TX_WAKE_THRESHOLD 32
3875	if (unlikely(count && netif_carrier_ok(netdev) &&
3876		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3877		/* Make sure that anybody stopping the queue after this
3878		 * sees the new next_to_clean.
3879		 */
3880		smp_mb();
3881
3882		if (netif_queue_stopped(netdev) &&
3883		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3884			netif_wake_queue(netdev);
3885			++adapter->restart_queue;
3886		}
3887	}
3888
3889	if (adapter->detect_tx_hung) {
3890		/* Detect a transmit hang in hardware, this serializes the
3891		 * check with the clearing of time_stamp and movement of i
3892		 */
3893		adapter->detect_tx_hung = false;
3894		if (tx_ring->buffer_info[eop].time_stamp &&
3895		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3896			       (adapter->tx_timeout_factor * HZ)) &&
3897		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3898
3899			/* detected Tx unit hang */
3900			e_err(drv, "Detected Tx Unit Hang\n"
3901			      "  Tx Queue             <%lu>\n"
3902			      "  TDH                  <%x>\n"
3903			      "  TDT                  <%x>\n"
3904			      "  next_to_use          <%x>\n"
3905			      "  next_to_clean        <%x>\n"
3906			      "buffer_info[next_to_clean]\n"
3907			      "  time_stamp           <%lx>\n"
3908			      "  next_to_watch        <%x>\n"
3909			      "  jiffies              <%lx>\n"
3910			      "  next_to_watch.status <%x>\n",
3911				(unsigned long)(tx_ring - adapter->tx_ring),
3912				readl(hw->hw_addr + tx_ring->tdh),
3913				readl(hw->hw_addr + tx_ring->tdt),
3914				tx_ring->next_to_use,
3915				tx_ring->next_to_clean,
3916				tx_ring->buffer_info[eop].time_stamp,
3917				eop,
3918				jiffies,
3919				eop_desc->upper.fields.status);
3920			e1000_dump(adapter);
3921			netif_stop_queue(netdev);
3922		}
3923	}
3924	adapter->total_tx_bytes += total_tx_bytes;
3925	adapter->total_tx_packets += total_tx_packets;
3926	netdev->stats.tx_bytes += total_tx_bytes;
3927	netdev->stats.tx_packets += total_tx_packets;
3928	return count < tx_ring->count;
3929}
3930
3931/**
3932 * e1000_rx_checksum - Receive Checksum Offload for 82543
3933 * @adapter:     board private structure
3934 * @status_err:  receive descriptor status and error fields
3935 * @csum:        receive descriptor csum field
3936 * @sk_buff:     socket buffer with received data
3937 **/
3938static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3939			      u32 csum, struct sk_buff *skb)
3940{
3941	struct e1000_hw *hw = &adapter->hw;
3942	u16 status = (u16)status_err;
3943	u8 errors = (u8)(status_err >> 24);
3944
3945	skb_checksum_none_assert(skb);
3946
3947	/* 82543 or newer only */
3948	if (unlikely(hw->mac_type < e1000_82543))
3949		return;
3950	/* Ignore Checksum bit is set */
3951	if (unlikely(status & E1000_RXD_STAT_IXSM))
3952		return;
3953	/* TCP/UDP checksum error bit is set */
3954	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3955		/* let the stack verify checksum errors */
3956		adapter->hw_csum_err++;
3957		return;
3958	}
3959	/* TCP/UDP Checksum has not been calculated */
3960	if (!(status & E1000_RXD_STAT_TCPCS))
3961		return;
3962
3963	/* It must be a TCP or UDP packet with a valid checksum */
3964	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3965		/* TCP checksum is good */
3966		skb->ip_summed = CHECKSUM_UNNECESSARY;
3967	}
3968	adapter->hw_csum_good++;
3969}
3970
3971/**
3972 * e1000_consume_page - helper function for jumbo Rx path
 
 
 
3973 **/
3974static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3975			       u16 length)
3976{
3977	bi->rxbuf.page = NULL;
3978	skb->len += length;
3979	skb->data_len += length;
3980	skb->truesize += PAGE_SIZE;
3981}
3982
3983/**
3984 * e1000_receive_skb - helper function to handle rx indications
3985 * @adapter: board private structure
3986 * @status: descriptor status field as written by hardware
3987 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3988 * @skb: pointer to sk_buff to be indicated to stack
3989 */
3990static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3991			      __le16 vlan, struct sk_buff *skb)
3992{
3993	skb->protocol = eth_type_trans(skb, adapter->netdev);
3994
3995	if (status & E1000_RXD_STAT_VP) {
3996		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3997
3998		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3999	}
4000	napi_gro_receive(&adapter->napi, skb);
4001}
4002
4003/**
4004 * e1000_tbi_adjust_stats
4005 * @hw: Struct containing variables accessed by shared code
 
4006 * @frame_len: The length of the frame in question
4007 * @mac_addr: The Ethernet destination address of the frame in question
4008 *
4009 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4010 */
4011static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4012				   struct e1000_hw_stats *stats,
4013				   u32 frame_len, const u8 *mac_addr)
4014{
4015	u64 carry_bit;
4016
4017	/* First adjust the frame length. */
4018	frame_len--;
4019	/* We need to adjust the statistics counters, since the hardware
4020	 * counters overcount this packet as a CRC error and undercount
4021	 * the packet as a good packet
4022	 */
4023	/* This packet should not be counted as a CRC error. */
4024	stats->crcerrs--;
4025	/* This packet does count as a Good Packet Received. */
4026	stats->gprc++;
4027
4028	/* Adjust the Good Octets received counters */
4029	carry_bit = 0x80000000 & stats->gorcl;
4030	stats->gorcl += frame_len;
4031	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4032	 * Received Count) was one before the addition,
4033	 * AND it is zero after, then we lost the carry out,
4034	 * need to add one to Gorch (Good Octets Received Count High).
4035	 * This could be simplified if all environments supported
4036	 * 64-bit integers.
4037	 */
4038	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4039		stats->gorch++;
4040	/* Is this a broadcast or multicast?  Check broadcast first,
4041	 * since the test for a multicast frame will test positive on
4042	 * a broadcast frame.
4043	 */
4044	if (is_broadcast_ether_addr(mac_addr))
4045		stats->bprc++;
4046	else if (is_multicast_ether_addr(mac_addr))
4047		stats->mprc++;
4048
4049	if (frame_len == hw->max_frame_size) {
4050		/* In this case, the hardware has overcounted the number of
4051		 * oversize frames.
4052		 */
4053		if (stats->roc > 0)
4054			stats->roc--;
4055	}
4056
4057	/* Adjust the bin counters when the extra byte put the frame in the
4058	 * wrong bin. Remember that the frame_len was adjusted above.
4059	 */
4060	if (frame_len == 64) {
4061		stats->prc64++;
4062		stats->prc127--;
4063	} else if (frame_len == 127) {
4064		stats->prc127++;
4065		stats->prc255--;
4066	} else if (frame_len == 255) {
4067		stats->prc255++;
4068		stats->prc511--;
4069	} else if (frame_len == 511) {
4070		stats->prc511++;
4071		stats->prc1023--;
4072	} else if (frame_len == 1023) {
4073		stats->prc1023++;
4074		stats->prc1522--;
4075	} else if (frame_len == 1522) {
4076		stats->prc1522++;
4077	}
4078}
4079
4080static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4081				    u8 status, u8 errors,
4082				    u32 length, const u8 *data)
4083{
4084	struct e1000_hw *hw = &adapter->hw;
4085	u8 last_byte = *(data + length - 1);
4086
4087	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4088		unsigned long irq_flags;
4089
4090		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4091		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4092		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4093
4094		return true;
4095	}
4096
4097	return false;
4098}
4099
4100static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4101					  unsigned int bufsz)
4102{
4103	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4104
4105	if (unlikely(!skb))
4106		adapter->alloc_rx_buff_failed++;
4107	return skb;
4108}
4109
4110/**
4111 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4112 * @adapter: board private structure
4113 * @rx_ring: ring to clean
4114 * @work_done: amount of napi work completed this call
4115 * @work_to_do: max amount of work allowed for this call to do
4116 *
4117 * the return value indicates whether actual cleaning was done, there
4118 * is no guarantee that everything was cleaned
4119 */
4120static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4121				     struct e1000_rx_ring *rx_ring,
4122				     int *work_done, int work_to_do)
4123{
4124	struct net_device *netdev = adapter->netdev;
4125	struct pci_dev *pdev = adapter->pdev;
4126	struct e1000_rx_desc *rx_desc, *next_rxd;
4127	struct e1000_rx_buffer *buffer_info, *next_buffer;
4128	u32 length;
4129	unsigned int i;
4130	int cleaned_count = 0;
4131	bool cleaned = false;
4132	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4133
4134	i = rx_ring->next_to_clean;
4135	rx_desc = E1000_RX_DESC(*rx_ring, i);
4136	buffer_info = &rx_ring->buffer_info[i];
4137
4138	while (rx_desc->status & E1000_RXD_STAT_DD) {
4139		struct sk_buff *skb;
4140		u8 status;
4141
4142		if (*work_done >= work_to_do)
4143			break;
4144		(*work_done)++;
4145		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4146
4147		status = rx_desc->status;
4148
4149		if (++i == rx_ring->count)
4150			i = 0;
4151
4152		next_rxd = E1000_RX_DESC(*rx_ring, i);
4153		prefetch(next_rxd);
4154
4155		next_buffer = &rx_ring->buffer_info[i];
4156
4157		cleaned = true;
4158		cleaned_count++;
4159		dma_unmap_page(&pdev->dev, buffer_info->dma,
4160			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4161		buffer_info->dma = 0;
4162
4163		length = le16_to_cpu(rx_desc->length);
4164
4165		/* errors is only valid for DD + EOP descriptors */
4166		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4167		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4168			u8 *mapped = page_address(buffer_info->rxbuf.page);
4169
4170			if (e1000_tbi_should_accept(adapter, status,
4171						    rx_desc->errors,
4172						    length, mapped)) {
4173				length--;
4174			} else if (netdev->features & NETIF_F_RXALL) {
4175				goto process_skb;
4176			} else {
4177				/* an error means any chain goes out the window
4178				 * too
4179				 */
4180				dev_kfree_skb(rx_ring->rx_skb_top);
4181				rx_ring->rx_skb_top = NULL;
4182				goto next_desc;
4183			}
4184		}
4185
4186#define rxtop rx_ring->rx_skb_top
4187process_skb:
4188		if (!(status & E1000_RXD_STAT_EOP)) {
4189			/* this descriptor is only the beginning (or middle) */
4190			if (!rxtop) {
4191				/* this is the beginning of a chain */
4192				rxtop = napi_get_frags(&adapter->napi);
4193				if (!rxtop)
4194					break;
4195
4196				skb_fill_page_desc(rxtop, 0,
4197						   buffer_info->rxbuf.page,
4198						   0, length);
4199			} else {
4200				/* this is the middle of a chain */
4201				skb_fill_page_desc(rxtop,
4202				    skb_shinfo(rxtop)->nr_frags,
4203				    buffer_info->rxbuf.page, 0, length);
4204			}
4205			e1000_consume_page(buffer_info, rxtop, length);
4206			goto next_desc;
4207		} else {
4208			if (rxtop) {
4209				/* end of the chain */
4210				skb_fill_page_desc(rxtop,
4211				    skb_shinfo(rxtop)->nr_frags,
4212				    buffer_info->rxbuf.page, 0, length);
4213				skb = rxtop;
4214				rxtop = NULL;
4215				e1000_consume_page(buffer_info, skb, length);
4216			} else {
4217				struct page *p;
4218				/* no chain, got EOP, this buf is the packet
4219				 * copybreak to save the put_page/alloc_page
4220				 */
4221				p = buffer_info->rxbuf.page;
4222				if (length <= copybreak) {
4223					u8 *vaddr;
4224
4225					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4226						length -= 4;
4227					skb = e1000_alloc_rx_skb(adapter,
4228								 length);
4229					if (!skb)
4230						break;
4231
4232					vaddr = kmap_atomic(p);
4233					memcpy(skb_tail_pointer(skb), vaddr,
4234					       length);
4235					kunmap_atomic(vaddr);
4236					/* re-use the page, so don't erase
4237					 * buffer_info->rxbuf.page
4238					 */
4239					skb_put(skb, length);
4240					e1000_rx_checksum(adapter,
4241							  status | rx_desc->errors << 24,
4242							  le16_to_cpu(rx_desc->csum), skb);
4243
4244					total_rx_bytes += skb->len;
4245					total_rx_packets++;
4246
4247					e1000_receive_skb(adapter, status,
4248							  rx_desc->special, skb);
4249					goto next_desc;
4250				} else {
4251					skb = napi_get_frags(&adapter->napi);
4252					if (!skb) {
4253						adapter->alloc_rx_buff_failed++;
4254						break;
4255					}
4256					skb_fill_page_desc(skb, 0, p, 0,
4257							   length);
4258					e1000_consume_page(buffer_info, skb,
4259							   length);
4260				}
4261			}
4262		}
4263
4264		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4265		e1000_rx_checksum(adapter,
4266				  (u32)(status) |
4267				  ((u32)(rx_desc->errors) << 24),
4268				  le16_to_cpu(rx_desc->csum), skb);
4269
4270		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4271		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4272			pskb_trim(skb, skb->len - 4);
4273		total_rx_packets++;
4274
4275		if (status & E1000_RXD_STAT_VP) {
4276			__le16 vlan = rx_desc->special;
4277			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4278
4279			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4280		}
4281
4282		napi_gro_frags(&adapter->napi);
4283
4284next_desc:
4285		rx_desc->status = 0;
4286
4287		/* return some buffers to hardware, one at a time is too slow */
4288		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4289			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4290			cleaned_count = 0;
4291		}
4292
4293		/* use prefetched values */
4294		rx_desc = next_rxd;
4295		buffer_info = next_buffer;
4296	}
4297	rx_ring->next_to_clean = i;
4298
4299	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4300	if (cleaned_count)
4301		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4302
4303	adapter->total_rx_packets += total_rx_packets;
4304	adapter->total_rx_bytes += total_rx_bytes;
4305	netdev->stats.rx_bytes += total_rx_bytes;
4306	netdev->stats.rx_packets += total_rx_packets;
4307	return cleaned;
4308}
4309
4310/* this should improve performance for small packets with large amounts
4311 * of reassembly being done in the stack
4312 */
4313static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4314				       struct e1000_rx_buffer *buffer_info,
4315				       u32 length, const void *data)
4316{
4317	struct sk_buff *skb;
4318
4319	if (length > copybreak)
4320		return NULL;
4321
4322	skb = e1000_alloc_rx_skb(adapter, length);
4323	if (!skb)
4324		return NULL;
4325
4326	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4327				length, DMA_FROM_DEVICE);
4328
4329	skb_put_data(skb, data, length);
4330
4331	return skb;
4332}
4333
4334/**
4335 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4336 * @adapter: board private structure
4337 * @rx_ring: ring to clean
4338 * @work_done: amount of napi work completed this call
4339 * @work_to_do: max amount of work allowed for this call to do
4340 */
4341static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4342			       struct e1000_rx_ring *rx_ring,
4343			       int *work_done, int work_to_do)
4344{
4345	struct net_device *netdev = adapter->netdev;
4346	struct pci_dev *pdev = adapter->pdev;
4347	struct e1000_rx_desc *rx_desc, *next_rxd;
4348	struct e1000_rx_buffer *buffer_info, *next_buffer;
4349	u32 length;
4350	unsigned int i;
4351	int cleaned_count = 0;
4352	bool cleaned = false;
4353	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4354
4355	i = rx_ring->next_to_clean;
4356	rx_desc = E1000_RX_DESC(*rx_ring, i);
4357	buffer_info = &rx_ring->buffer_info[i];
4358
4359	while (rx_desc->status & E1000_RXD_STAT_DD) {
4360		struct sk_buff *skb;
4361		u8 *data;
4362		u8 status;
4363
4364		if (*work_done >= work_to_do)
4365			break;
4366		(*work_done)++;
4367		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4368
4369		status = rx_desc->status;
4370		length = le16_to_cpu(rx_desc->length);
4371
4372		data = buffer_info->rxbuf.data;
4373		prefetch(data);
4374		skb = e1000_copybreak(adapter, buffer_info, length, data);
4375		if (!skb) {
4376			unsigned int frag_len = e1000_frag_len(adapter);
4377
4378			skb = build_skb(data - E1000_HEADROOM, frag_len);
4379			if (!skb) {
4380				adapter->alloc_rx_buff_failed++;
4381				break;
4382			}
4383
4384			skb_reserve(skb, E1000_HEADROOM);
4385			dma_unmap_single(&pdev->dev, buffer_info->dma,
4386					 adapter->rx_buffer_len,
4387					 DMA_FROM_DEVICE);
4388			buffer_info->dma = 0;
4389			buffer_info->rxbuf.data = NULL;
4390		}
4391
4392		if (++i == rx_ring->count)
4393			i = 0;
4394
4395		next_rxd = E1000_RX_DESC(*rx_ring, i);
4396		prefetch(next_rxd);
4397
4398		next_buffer = &rx_ring->buffer_info[i];
4399
4400		cleaned = true;
4401		cleaned_count++;
4402
4403		/* !EOP means multiple descriptors were used to store a single
4404		 * packet, if thats the case we need to toss it.  In fact, we
4405		 * to toss every packet with the EOP bit clear and the next
4406		 * frame that _does_ have the EOP bit set, as it is by
4407		 * definition only a frame fragment
4408		 */
4409		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4410			adapter->discarding = true;
4411
4412		if (adapter->discarding) {
4413			/* All receives must fit into a single buffer */
4414			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4415			dev_kfree_skb(skb);
4416			if (status & E1000_RXD_STAT_EOP)
4417				adapter->discarding = false;
4418			goto next_desc;
4419		}
4420
4421		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4422			if (e1000_tbi_should_accept(adapter, status,
4423						    rx_desc->errors,
4424						    length, data)) {
4425				length--;
4426			} else if (netdev->features & NETIF_F_RXALL) {
4427				goto process_skb;
4428			} else {
4429				dev_kfree_skb(skb);
4430				goto next_desc;
4431			}
4432		}
4433
4434process_skb:
4435		total_rx_bytes += (length - 4); /* don't count FCS */
4436		total_rx_packets++;
4437
4438		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4439			/* adjust length to remove Ethernet CRC, this must be
4440			 * done after the TBI_ACCEPT workaround above
4441			 */
4442			length -= 4;
4443
4444		if (buffer_info->rxbuf.data == NULL)
4445			skb_put(skb, length);
4446		else /* copybreak skb */
4447			skb_trim(skb, length);
4448
4449		/* Receive Checksum Offload */
4450		e1000_rx_checksum(adapter,
4451				  (u32)(status) |
4452				  ((u32)(rx_desc->errors) << 24),
4453				  le16_to_cpu(rx_desc->csum), skb);
4454
4455		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4456
4457next_desc:
4458		rx_desc->status = 0;
4459
4460		/* return some buffers to hardware, one at a time is too slow */
4461		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4462			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4463			cleaned_count = 0;
4464		}
4465
4466		/* use prefetched values */
4467		rx_desc = next_rxd;
4468		buffer_info = next_buffer;
4469	}
4470	rx_ring->next_to_clean = i;
4471
4472	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4473	if (cleaned_count)
4474		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4475
4476	adapter->total_rx_packets += total_rx_packets;
4477	adapter->total_rx_bytes += total_rx_bytes;
4478	netdev->stats.rx_bytes += total_rx_bytes;
4479	netdev->stats.rx_packets += total_rx_packets;
4480	return cleaned;
4481}
4482
4483/**
4484 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4485 * @adapter: address of board private structure
4486 * @rx_ring: pointer to receive ring structure
4487 * @cleaned_count: number of buffers to allocate this pass
4488 **/
4489static void
4490e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4491			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4492{
4493	struct pci_dev *pdev = adapter->pdev;
4494	struct e1000_rx_desc *rx_desc;
4495	struct e1000_rx_buffer *buffer_info;
4496	unsigned int i;
4497
4498	i = rx_ring->next_to_use;
4499	buffer_info = &rx_ring->buffer_info[i];
4500
4501	while (cleaned_count--) {
4502		/* allocate a new page if necessary */
4503		if (!buffer_info->rxbuf.page) {
4504			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4505			if (unlikely(!buffer_info->rxbuf.page)) {
4506				adapter->alloc_rx_buff_failed++;
4507				break;
4508			}
4509		}
4510
4511		if (!buffer_info->dma) {
4512			buffer_info->dma = dma_map_page(&pdev->dev,
4513							buffer_info->rxbuf.page, 0,
4514							adapter->rx_buffer_len,
4515							DMA_FROM_DEVICE);
4516			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4517				put_page(buffer_info->rxbuf.page);
4518				buffer_info->rxbuf.page = NULL;
4519				buffer_info->dma = 0;
4520				adapter->alloc_rx_buff_failed++;
4521				break;
4522			}
4523		}
4524
4525		rx_desc = E1000_RX_DESC(*rx_ring, i);
4526		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4527
4528		if (unlikely(++i == rx_ring->count))
4529			i = 0;
4530		buffer_info = &rx_ring->buffer_info[i];
4531	}
4532
4533	if (likely(rx_ring->next_to_use != i)) {
4534		rx_ring->next_to_use = i;
4535		if (unlikely(i-- == 0))
4536			i = (rx_ring->count - 1);
4537
4538		/* Force memory writes to complete before letting h/w
4539		 * know there are new descriptors to fetch.  (Only
4540		 * applicable for weak-ordered memory model archs,
4541		 * such as IA-64).
4542		 */
4543		dma_wmb();
4544		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4545	}
4546}
4547
4548/**
4549 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4550 * @adapter: address of board private structure
 
 
4551 **/
4552static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4553				   struct e1000_rx_ring *rx_ring,
4554				   int cleaned_count)
4555{
4556	struct e1000_hw *hw = &adapter->hw;
4557	struct pci_dev *pdev = adapter->pdev;
4558	struct e1000_rx_desc *rx_desc;
4559	struct e1000_rx_buffer *buffer_info;
4560	unsigned int i;
4561	unsigned int bufsz = adapter->rx_buffer_len;
4562
4563	i = rx_ring->next_to_use;
4564	buffer_info = &rx_ring->buffer_info[i];
4565
4566	while (cleaned_count--) {
4567		void *data;
4568
4569		if (buffer_info->rxbuf.data)
4570			goto skip;
4571
4572		data = e1000_alloc_frag(adapter);
4573		if (!data) {
4574			/* Better luck next round */
4575			adapter->alloc_rx_buff_failed++;
4576			break;
4577		}
4578
4579		/* Fix for errata 23, can't cross 64kB boundary */
4580		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4581			void *olddata = data;
4582			e_err(rx_err, "skb align check failed: %u bytes at "
4583			      "%p\n", bufsz, data);
4584			/* Try again, without freeing the previous */
4585			data = e1000_alloc_frag(adapter);
4586			/* Failed allocation, critical failure */
4587			if (!data) {
4588				skb_free_frag(olddata);
4589				adapter->alloc_rx_buff_failed++;
4590				break;
4591			}
4592
4593			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4594				/* give up */
4595				skb_free_frag(data);
4596				skb_free_frag(olddata);
4597				adapter->alloc_rx_buff_failed++;
4598				break;
4599			}
4600
4601			/* Use new allocation */
4602			skb_free_frag(olddata);
4603		}
4604		buffer_info->dma = dma_map_single(&pdev->dev,
4605						  data,
4606						  adapter->rx_buffer_len,
4607						  DMA_FROM_DEVICE);
4608		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4609			skb_free_frag(data);
4610			buffer_info->dma = 0;
4611			adapter->alloc_rx_buff_failed++;
4612			break;
4613		}
4614
4615		/* XXX if it was allocated cleanly it will never map to a
4616		 * boundary crossing
4617		 */
4618
4619		/* Fix for errata 23, can't cross 64kB boundary */
4620		if (!e1000_check_64k_bound(adapter,
4621					(void *)(unsigned long)buffer_info->dma,
4622					adapter->rx_buffer_len)) {
4623			e_err(rx_err, "dma align check failed: %u bytes at "
4624			      "%p\n", adapter->rx_buffer_len,
4625			      (void *)(unsigned long)buffer_info->dma);
4626
4627			dma_unmap_single(&pdev->dev, buffer_info->dma,
4628					 adapter->rx_buffer_len,
4629					 DMA_FROM_DEVICE);
4630
4631			skb_free_frag(data);
4632			buffer_info->rxbuf.data = NULL;
4633			buffer_info->dma = 0;
4634
4635			adapter->alloc_rx_buff_failed++;
4636			break;
4637		}
4638		buffer_info->rxbuf.data = data;
4639 skip:
4640		rx_desc = E1000_RX_DESC(*rx_ring, i);
4641		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4642
4643		if (unlikely(++i == rx_ring->count))
4644			i = 0;
4645		buffer_info = &rx_ring->buffer_info[i];
4646	}
4647
4648	if (likely(rx_ring->next_to_use != i)) {
4649		rx_ring->next_to_use = i;
4650		if (unlikely(i-- == 0))
4651			i = (rx_ring->count - 1);
4652
4653		/* Force memory writes to complete before letting h/w
4654		 * know there are new descriptors to fetch.  (Only
4655		 * applicable for weak-ordered memory model archs,
4656		 * such as IA-64).
4657		 */
4658		dma_wmb();
4659		writel(i, hw->hw_addr + rx_ring->rdt);
4660	}
4661}
4662
4663/**
4664 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4665 * @adapter:
4666 **/
4667static void e1000_smartspeed(struct e1000_adapter *adapter)
4668{
4669	struct e1000_hw *hw = &adapter->hw;
4670	u16 phy_status;
4671	u16 phy_ctrl;
4672
4673	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4674	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4675		return;
4676
4677	if (adapter->smartspeed == 0) {
4678		/* If Master/Slave config fault is asserted twice,
4679		 * we assume back-to-back
4680		 */
4681		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4682		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4683			return;
4684		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4685		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4686			return;
4687		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4688		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4689			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4690			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4691					    phy_ctrl);
4692			adapter->smartspeed++;
4693			if (!e1000_phy_setup_autoneg(hw) &&
4694			   !e1000_read_phy_reg(hw, PHY_CTRL,
4695					       &phy_ctrl)) {
4696				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4697					     MII_CR_RESTART_AUTO_NEG);
4698				e1000_write_phy_reg(hw, PHY_CTRL,
4699						    phy_ctrl);
4700			}
4701		}
4702		return;
4703	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4704		/* If still no link, perhaps using 2/3 pair cable */
4705		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4706		phy_ctrl |= CR_1000T_MS_ENABLE;
4707		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4708		if (!e1000_phy_setup_autoneg(hw) &&
4709		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4710			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4711				     MII_CR_RESTART_AUTO_NEG);
4712			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4713		}
4714	}
4715	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4716	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4717		adapter->smartspeed = 0;
4718}
4719
4720/**
4721 * e1000_ioctl -
4722 * @netdev:
4723 * @ifreq:
4724 * @cmd:
4725 **/
4726static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4727{
4728	switch (cmd) {
4729	case SIOCGMIIPHY:
4730	case SIOCGMIIREG:
4731	case SIOCSMIIREG:
4732		return e1000_mii_ioctl(netdev, ifr, cmd);
4733	default:
4734		return -EOPNOTSUPP;
4735	}
4736}
4737
4738/**
4739 * e1000_mii_ioctl -
4740 * @netdev:
4741 * @ifreq:
4742 * @cmd:
4743 **/
4744static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4745			   int cmd)
4746{
4747	struct e1000_adapter *adapter = netdev_priv(netdev);
4748	struct e1000_hw *hw = &adapter->hw;
4749	struct mii_ioctl_data *data = if_mii(ifr);
4750	int retval;
4751	u16 mii_reg;
4752	unsigned long flags;
4753
4754	if (hw->media_type != e1000_media_type_copper)
4755		return -EOPNOTSUPP;
4756
4757	switch (cmd) {
4758	case SIOCGMIIPHY:
4759		data->phy_id = hw->phy_addr;
4760		break;
4761	case SIOCGMIIREG:
4762		spin_lock_irqsave(&adapter->stats_lock, flags);
4763		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4764				   &data->val_out)) {
4765			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4766			return -EIO;
4767		}
4768		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4769		break;
4770	case SIOCSMIIREG:
4771		if (data->reg_num & ~(0x1F))
4772			return -EFAULT;
4773		mii_reg = data->val_in;
4774		spin_lock_irqsave(&adapter->stats_lock, flags);
4775		if (e1000_write_phy_reg(hw, data->reg_num,
4776					mii_reg)) {
4777			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4778			return -EIO;
4779		}
4780		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4781		if (hw->media_type == e1000_media_type_copper) {
4782			switch (data->reg_num) {
4783			case PHY_CTRL:
4784				if (mii_reg & MII_CR_POWER_DOWN)
4785					break;
4786				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4787					hw->autoneg = 1;
4788					hw->autoneg_advertised = 0x2F;
4789				} else {
4790					u32 speed;
4791					if (mii_reg & 0x40)
4792						speed = SPEED_1000;
4793					else if (mii_reg & 0x2000)
4794						speed = SPEED_100;
4795					else
4796						speed = SPEED_10;
4797					retval = e1000_set_spd_dplx(
4798						adapter, speed,
4799						((mii_reg & 0x100)
4800						 ? DUPLEX_FULL :
4801						 DUPLEX_HALF));
4802					if (retval)
4803						return retval;
4804				}
4805				if (netif_running(adapter->netdev))
4806					e1000_reinit_locked(adapter);
4807				else
4808					e1000_reset(adapter);
4809				break;
4810			case M88E1000_PHY_SPEC_CTRL:
4811			case M88E1000_EXT_PHY_SPEC_CTRL:
4812				if (e1000_phy_reset(hw))
4813					return -EIO;
4814				break;
4815			}
4816		} else {
4817			switch (data->reg_num) {
4818			case PHY_CTRL:
4819				if (mii_reg & MII_CR_POWER_DOWN)
4820					break;
4821				if (netif_running(adapter->netdev))
4822					e1000_reinit_locked(adapter);
4823				else
4824					e1000_reset(adapter);
4825				break;
4826			}
4827		}
4828		break;
4829	default:
4830		return -EOPNOTSUPP;
4831	}
4832	return E1000_SUCCESS;
4833}
4834
4835void e1000_pci_set_mwi(struct e1000_hw *hw)
4836{
4837	struct e1000_adapter *adapter = hw->back;
4838	int ret_val = pci_set_mwi(adapter->pdev);
4839
4840	if (ret_val)
4841		e_err(probe, "Error in setting MWI\n");
4842}
4843
4844void e1000_pci_clear_mwi(struct e1000_hw *hw)
4845{
4846	struct e1000_adapter *adapter = hw->back;
4847
4848	pci_clear_mwi(adapter->pdev);
4849}
4850
4851int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4852{
4853	struct e1000_adapter *adapter = hw->back;
4854	return pcix_get_mmrbc(adapter->pdev);
4855}
4856
4857void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4858{
4859	struct e1000_adapter *adapter = hw->back;
4860	pcix_set_mmrbc(adapter->pdev, mmrbc);
4861}
4862
4863void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4864{
4865	outl(value, port);
4866}
4867
4868static bool e1000_vlan_used(struct e1000_adapter *adapter)
4869{
4870	u16 vid;
4871
4872	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4873		return true;
4874	return false;
4875}
4876
4877static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4878			      netdev_features_t features)
4879{
4880	struct e1000_hw *hw = &adapter->hw;
4881	u32 ctrl;
4882
4883	ctrl = er32(CTRL);
4884	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4885		/* enable VLAN tag insert/strip */
4886		ctrl |= E1000_CTRL_VME;
4887	} else {
4888		/* disable VLAN tag insert/strip */
4889		ctrl &= ~E1000_CTRL_VME;
4890	}
4891	ew32(CTRL, ctrl);
4892}
4893static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4894				     bool filter_on)
4895{
4896	struct e1000_hw *hw = &adapter->hw;
4897	u32 rctl;
4898
4899	if (!test_bit(__E1000_DOWN, &adapter->flags))
4900		e1000_irq_disable(adapter);
4901
4902	__e1000_vlan_mode(adapter, adapter->netdev->features);
4903	if (filter_on) {
4904		/* enable VLAN receive filtering */
4905		rctl = er32(RCTL);
4906		rctl &= ~E1000_RCTL_CFIEN;
4907		if (!(adapter->netdev->flags & IFF_PROMISC))
4908			rctl |= E1000_RCTL_VFE;
4909		ew32(RCTL, rctl);
4910		e1000_update_mng_vlan(adapter);
4911	} else {
4912		/* disable VLAN receive filtering */
4913		rctl = er32(RCTL);
4914		rctl &= ~E1000_RCTL_VFE;
4915		ew32(RCTL, rctl);
4916	}
4917
4918	if (!test_bit(__E1000_DOWN, &adapter->flags))
4919		e1000_irq_enable(adapter);
4920}
4921
4922static void e1000_vlan_mode(struct net_device *netdev,
4923			    netdev_features_t features)
4924{
4925	struct e1000_adapter *adapter = netdev_priv(netdev);
4926
4927	if (!test_bit(__E1000_DOWN, &adapter->flags))
4928		e1000_irq_disable(adapter);
4929
4930	__e1000_vlan_mode(adapter, features);
4931
4932	if (!test_bit(__E1000_DOWN, &adapter->flags))
4933		e1000_irq_enable(adapter);
4934}
4935
4936static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4937				 __be16 proto, u16 vid)
4938{
4939	struct e1000_adapter *adapter = netdev_priv(netdev);
4940	struct e1000_hw *hw = &adapter->hw;
4941	u32 vfta, index;
4942
4943	if ((hw->mng_cookie.status &
4944	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4945	    (vid == adapter->mng_vlan_id))
4946		return 0;
4947
4948	if (!e1000_vlan_used(adapter))
4949		e1000_vlan_filter_on_off(adapter, true);
4950
4951	/* add VID to filter table */
4952	index = (vid >> 5) & 0x7F;
4953	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4954	vfta |= (1 << (vid & 0x1F));
4955	e1000_write_vfta(hw, index, vfta);
4956
4957	set_bit(vid, adapter->active_vlans);
4958
4959	return 0;
4960}
4961
4962static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4963				  __be16 proto, u16 vid)
4964{
4965	struct e1000_adapter *adapter = netdev_priv(netdev);
4966	struct e1000_hw *hw = &adapter->hw;
4967	u32 vfta, index;
4968
4969	if (!test_bit(__E1000_DOWN, &adapter->flags))
4970		e1000_irq_disable(adapter);
4971	if (!test_bit(__E1000_DOWN, &adapter->flags))
4972		e1000_irq_enable(adapter);
4973
4974	/* remove VID from filter table */
4975	index = (vid >> 5) & 0x7F;
4976	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4977	vfta &= ~(1 << (vid & 0x1F));
4978	e1000_write_vfta(hw, index, vfta);
4979
4980	clear_bit(vid, adapter->active_vlans);
4981
4982	if (!e1000_vlan_used(adapter))
4983		e1000_vlan_filter_on_off(adapter, false);
4984
4985	return 0;
4986}
4987
4988static void e1000_restore_vlan(struct e1000_adapter *adapter)
4989{
4990	u16 vid;
4991
4992	if (!e1000_vlan_used(adapter))
4993		return;
4994
4995	e1000_vlan_filter_on_off(adapter, true);
4996	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4997		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4998}
4999
5000int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5001{
5002	struct e1000_hw *hw = &adapter->hw;
5003
5004	hw->autoneg = 0;
5005
5006	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5007	 * for the switch() below to work
5008	 */
5009	if ((spd & 1) || (dplx & ~1))
5010		goto err_inval;
5011
5012	/* Fiber NICs only allow 1000 gbps Full duplex */
5013	if ((hw->media_type == e1000_media_type_fiber) &&
5014	    spd != SPEED_1000 &&
5015	    dplx != DUPLEX_FULL)
5016		goto err_inval;
5017
5018	switch (spd + dplx) {
5019	case SPEED_10 + DUPLEX_HALF:
5020		hw->forced_speed_duplex = e1000_10_half;
5021		break;
5022	case SPEED_10 + DUPLEX_FULL:
5023		hw->forced_speed_duplex = e1000_10_full;
5024		break;
5025	case SPEED_100 + DUPLEX_HALF:
5026		hw->forced_speed_duplex = e1000_100_half;
5027		break;
5028	case SPEED_100 + DUPLEX_FULL:
5029		hw->forced_speed_duplex = e1000_100_full;
5030		break;
5031	case SPEED_1000 + DUPLEX_FULL:
5032		hw->autoneg = 1;
5033		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5034		break;
5035	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5036	default:
5037		goto err_inval;
5038	}
5039
5040	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5041	hw->mdix = AUTO_ALL_MODES;
5042
5043	return 0;
5044
5045err_inval:
5046	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5047	return -EINVAL;
5048}
5049
5050static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5051{
5052	struct net_device *netdev = pci_get_drvdata(pdev);
5053	struct e1000_adapter *adapter = netdev_priv(netdev);
5054	struct e1000_hw *hw = &adapter->hw;
5055	u32 ctrl, ctrl_ext, rctl, status;
5056	u32 wufc = adapter->wol;
5057
5058	netif_device_detach(netdev);
5059
5060	if (netif_running(netdev)) {
5061		int count = E1000_CHECK_RESET_COUNT;
5062
5063		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5064			usleep_range(10000, 20000);
5065
5066		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5067		e1000_down(adapter);
5068	}
5069
5070	status = er32(STATUS);
5071	if (status & E1000_STATUS_LU)
5072		wufc &= ~E1000_WUFC_LNKC;
5073
5074	if (wufc) {
5075		e1000_setup_rctl(adapter);
5076		e1000_set_rx_mode(netdev);
5077
5078		rctl = er32(RCTL);
5079
5080		/* turn on all-multi mode if wake on multicast is enabled */
5081		if (wufc & E1000_WUFC_MC)
5082			rctl |= E1000_RCTL_MPE;
5083
5084		/* enable receives in the hardware */
5085		ew32(RCTL, rctl | E1000_RCTL_EN);
5086
5087		if (hw->mac_type >= e1000_82540) {
5088			ctrl = er32(CTRL);
5089			/* advertise wake from D3Cold */
5090			#define E1000_CTRL_ADVD3WUC 0x00100000
5091			/* phy power management enable */
5092			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5093			ctrl |= E1000_CTRL_ADVD3WUC |
5094				E1000_CTRL_EN_PHY_PWR_MGMT;
5095			ew32(CTRL, ctrl);
5096		}
5097
5098		if (hw->media_type == e1000_media_type_fiber ||
5099		    hw->media_type == e1000_media_type_internal_serdes) {
5100			/* keep the laser running in D3 */
5101			ctrl_ext = er32(CTRL_EXT);
5102			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5103			ew32(CTRL_EXT, ctrl_ext);
5104		}
5105
5106		ew32(WUC, E1000_WUC_PME_EN);
5107		ew32(WUFC, wufc);
5108	} else {
5109		ew32(WUC, 0);
5110		ew32(WUFC, 0);
5111	}
5112
5113	e1000_release_manageability(adapter);
5114
5115	*enable_wake = !!wufc;
5116
5117	/* make sure adapter isn't asleep if manageability is enabled */
5118	if (adapter->en_mng_pt)
5119		*enable_wake = true;
5120
5121	if (netif_running(netdev))
5122		e1000_free_irq(adapter);
5123
5124	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5125		pci_disable_device(pdev);
5126
5127	return 0;
5128}
5129
5130static int __maybe_unused e1000_suspend(struct device *dev)
5131{
5132	int retval;
5133	struct pci_dev *pdev = to_pci_dev(dev);
5134	bool wake;
5135
5136	retval = __e1000_shutdown(pdev, &wake);
5137	device_set_wakeup_enable(dev, wake);
5138
5139	return retval;
5140}
5141
5142static int __maybe_unused e1000_resume(struct device *dev)
5143{
5144	struct pci_dev *pdev = to_pci_dev(dev);
5145	struct net_device *netdev = pci_get_drvdata(pdev);
5146	struct e1000_adapter *adapter = netdev_priv(netdev);
5147	struct e1000_hw *hw = &adapter->hw;
5148	u32 err;
5149
5150	if (adapter->need_ioport)
5151		err = pci_enable_device(pdev);
5152	else
5153		err = pci_enable_device_mem(pdev);
5154	if (err) {
5155		pr_err("Cannot enable PCI device from suspend\n");
5156		return err;
5157	}
5158
5159	/* flush memory to make sure state is correct */
5160	smp_mb__before_atomic();
5161	clear_bit(__E1000_DISABLED, &adapter->flags);
5162	pci_set_master(pdev);
5163
5164	pci_enable_wake(pdev, PCI_D3hot, 0);
5165	pci_enable_wake(pdev, PCI_D3cold, 0);
5166
5167	if (netif_running(netdev)) {
5168		err = e1000_request_irq(adapter);
5169		if (err)
5170			return err;
5171	}
5172
5173	e1000_power_up_phy(adapter);
5174	e1000_reset(adapter);
5175	ew32(WUS, ~0);
5176
5177	e1000_init_manageability(adapter);
5178
5179	if (netif_running(netdev))
5180		e1000_up(adapter);
5181
5182	netif_device_attach(netdev);
5183
5184	return 0;
5185}
5186
5187static void e1000_shutdown(struct pci_dev *pdev)
5188{
5189	bool wake;
5190
5191	__e1000_shutdown(pdev, &wake);
5192
5193	if (system_state == SYSTEM_POWER_OFF) {
5194		pci_wake_from_d3(pdev, wake);
5195		pci_set_power_state(pdev, PCI_D3hot);
5196	}
5197}
5198
5199#ifdef CONFIG_NET_POLL_CONTROLLER
5200/* Polling 'interrupt' - used by things like netconsole to send skbs
5201 * without having to re-enable interrupts. It's not called while
5202 * the interrupt routine is executing.
5203 */
5204static void e1000_netpoll(struct net_device *netdev)
5205{
5206	struct e1000_adapter *adapter = netdev_priv(netdev);
5207
5208	if (disable_hardirq(adapter->pdev->irq))
5209		e1000_intr(adapter->pdev->irq, netdev);
5210	enable_irq(adapter->pdev->irq);
5211}
5212#endif
5213
5214/**
5215 * e1000_io_error_detected - called when PCI error is detected
5216 * @pdev: Pointer to PCI device
5217 * @state: The current pci connection state
5218 *
5219 * This function is called after a PCI bus error affecting
5220 * this device has been detected.
5221 */
5222static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5223						pci_channel_state_t state)
5224{
5225	struct net_device *netdev = pci_get_drvdata(pdev);
5226	struct e1000_adapter *adapter = netdev_priv(netdev);
5227
5228	netif_device_detach(netdev);
5229
5230	if (state == pci_channel_io_perm_failure)
5231		return PCI_ERS_RESULT_DISCONNECT;
5232
5233	if (netif_running(netdev))
5234		e1000_down(adapter);
5235
5236	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5237		pci_disable_device(pdev);
5238
5239	/* Request a slot slot reset. */
5240	return PCI_ERS_RESULT_NEED_RESET;
5241}
5242
5243/**
5244 * e1000_io_slot_reset - called after the pci bus has been reset.
5245 * @pdev: Pointer to PCI device
5246 *
5247 * Restart the card from scratch, as if from a cold-boot. Implementation
5248 * resembles the first-half of the e1000_resume routine.
5249 */
5250static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5251{
5252	struct net_device *netdev = pci_get_drvdata(pdev);
5253	struct e1000_adapter *adapter = netdev_priv(netdev);
5254	struct e1000_hw *hw = &adapter->hw;
5255	int err;
5256
5257	if (adapter->need_ioport)
5258		err = pci_enable_device(pdev);
5259	else
5260		err = pci_enable_device_mem(pdev);
5261	if (err) {
5262		pr_err("Cannot re-enable PCI device after reset.\n");
5263		return PCI_ERS_RESULT_DISCONNECT;
5264	}
5265
5266	/* flush memory to make sure state is correct */
5267	smp_mb__before_atomic();
5268	clear_bit(__E1000_DISABLED, &adapter->flags);
5269	pci_set_master(pdev);
5270
5271	pci_enable_wake(pdev, PCI_D3hot, 0);
5272	pci_enable_wake(pdev, PCI_D3cold, 0);
5273
5274	e1000_reset(adapter);
5275	ew32(WUS, ~0);
5276
5277	return PCI_ERS_RESULT_RECOVERED;
5278}
5279
5280/**
5281 * e1000_io_resume - called when traffic can start flowing again.
5282 * @pdev: Pointer to PCI device
5283 *
5284 * This callback is called when the error recovery driver tells us that
5285 * its OK to resume normal operation. Implementation resembles the
5286 * second-half of the e1000_resume routine.
5287 */
5288static void e1000_io_resume(struct pci_dev *pdev)
5289{
5290	struct net_device *netdev = pci_get_drvdata(pdev);
5291	struct e1000_adapter *adapter = netdev_priv(netdev);
5292
5293	e1000_init_manageability(adapter);
5294
5295	if (netif_running(netdev)) {
5296		if (e1000_up(adapter)) {
5297			pr_info("can't bring device back up after reset\n");
5298			return;
5299		}
5300	}
5301
5302	netif_device_attach(netdev);
5303}
5304
5305/* e1000_main.c */