Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2006 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#include "e1000.h"
   5#include <net/ip6_checksum.h>
   6#include <linux/io.h>
   7#include <linux/prefetch.h>
   8#include <linux/bitops.h>
   9#include <linux/if_vlan.h>
  10
  11char e1000_driver_name[] = "e1000";
  12static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 
 
  13static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  14
  15/* e1000_pci_tbl - PCI Device ID Table
  16 *
  17 * Last entry must be all 0s
  18 *
  19 * Macro expands to...
  20 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  21 */
  22static const struct pci_device_id e1000_pci_tbl[] = {
  23	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  24	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  25	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  26	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  27	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  28	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  29	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  30	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  31	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  32	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  33	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  34	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  35	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  36	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  37	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  38	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  39	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  40	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  41	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  42	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  43	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  44	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  45	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  46	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  47	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  48	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  49	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  50	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  51	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  53	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  54	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  55	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  56	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  57	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  58	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  59	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  60	/* required last entry */
  61	{0,}
  62};
  63
  64MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  65
  66int e1000_up(struct e1000_adapter *adapter);
  67void e1000_down(struct e1000_adapter *adapter);
  68void e1000_reinit_locked(struct e1000_adapter *adapter);
  69void e1000_reset(struct e1000_adapter *adapter);
  70int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  71int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  72void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  73void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  74static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  75				    struct e1000_tx_ring *txdr);
  76static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  77				    struct e1000_rx_ring *rxdr);
  78static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  79				    struct e1000_tx_ring *tx_ring);
  80static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  81				    struct e1000_rx_ring *rx_ring);
  82void e1000_update_stats(struct e1000_adapter *adapter);
  83
  84static int e1000_init_module(void);
  85static void e1000_exit_module(void);
  86static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  87static void e1000_remove(struct pci_dev *pdev);
  88static int e1000_alloc_queues(struct e1000_adapter *adapter);
  89static int e1000_sw_init(struct e1000_adapter *adapter);
  90int e1000_open(struct net_device *netdev);
  91int e1000_close(struct net_device *netdev);
  92static void e1000_configure_tx(struct e1000_adapter *adapter);
  93static void e1000_configure_rx(struct e1000_adapter *adapter);
  94static void e1000_setup_rctl(struct e1000_adapter *adapter);
  95static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  96static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  97static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  98				struct e1000_tx_ring *tx_ring);
  99static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 100				struct e1000_rx_ring *rx_ring);
 101static void e1000_set_rx_mode(struct net_device *netdev);
 102static void e1000_update_phy_info_task(struct work_struct *work);
 103static void e1000_watchdog(struct work_struct *work);
 104static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 106				    struct net_device *netdev);
 107static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 108static int e1000_set_mac(struct net_device *netdev, void *p);
 109static irqreturn_t e1000_intr(int irq, void *data);
 110static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 111			       struct e1000_tx_ring *tx_ring);
 112static int e1000_clean(struct napi_struct *napi, int budget);
 113static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 114			       struct e1000_rx_ring *rx_ring,
 115			       int *work_done, int work_to_do);
 116static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 117				     struct e1000_rx_ring *rx_ring,
 118				     int *work_done, int work_to_do);
 119static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 120					 struct e1000_rx_ring *rx_ring,
 121					 int cleaned_count)
 122{
 123}
 124static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 125				   struct e1000_rx_ring *rx_ring,
 126				   int cleaned_count);
 127static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 128					 struct e1000_rx_ring *rx_ring,
 129					 int cleaned_count);
 130static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 131static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 132			   int cmd);
 133static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 134static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 135static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
 136static void e1000_reset_task(struct work_struct *work);
 137static void e1000_smartspeed(struct e1000_adapter *adapter);
 138static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 139				       struct sk_buff *skb);
 140
 141static bool e1000_vlan_used(struct e1000_adapter *adapter);
 142static void e1000_vlan_mode(struct net_device *netdev,
 143			    netdev_features_t features);
 144static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 145				     bool filter_on);
 146static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 147				 __be16 proto, u16 vid);
 148static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 149				  __be16 proto, u16 vid);
 150static void e1000_restore_vlan(struct e1000_adapter *adapter);
 151
 152static int e1000_suspend(struct device *dev);
 153static int e1000_resume(struct device *dev);
 
 
 154static void e1000_shutdown(struct pci_dev *pdev);
 155
 156#ifdef CONFIG_NET_POLL_CONTROLLER
 157/* for netdump / net console */
 158static void e1000_netpoll (struct net_device *netdev);
 159#endif
 160
 161#define COPYBREAK_DEFAULT 256
 162static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 163module_param(copybreak, uint, 0644);
 164MODULE_PARM_DESC(copybreak,
 165	"Maximum size of packet that is copied to a new buffer on receive");
 166
 167static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 168						pci_channel_state_t state);
 169static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 170static void e1000_io_resume(struct pci_dev *pdev);
 171
 172static const struct pci_error_handlers e1000_err_handler = {
 173	.error_detected = e1000_io_error_detected,
 174	.slot_reset = e1000_io_slot_reset,
 175	.resume = e1000_io_resume,
 176};
 177
 178static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
 179
 180static struct pci_driver e1000_driver = {
 181	.name     = e1000_driver_name,
 182	.id_table = e1000_pci_tbl,
 183	.probe    = e1000_probe,
 184	.remove   = e1000_remove,
 185	.driver.pm = pm_sleep_ptr(&e1000_pm_ops),
 
 
 
 
 186	.shutdown = e1000_shutdown,
 187	.err_handler = &e1000_err_handler
 188};
 189
 
 190MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 191MODULE_LICENSE("GPL v2");
 
 192
 193#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 194static int debug = -1;
 195module_param(debug, int, 0);
 196MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 197
 198/**
 199 * e1000_get_hw_dev - helper function for getting netdev
 200 * @hw: pointer to HW struct
 201 *
 202 * return device used by hardware layer to print debugging information
 203 *
 204 **/
 205struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 206{
 207	struct e1000_adapter *adapter = hw->back;
 208	return adapter->netdev;
 209}
 210
 211/**
 212 * e1000_init_module - Driver Registration Routine
 213 *
 214 * e1000_init_module is the first routine called when the driver is
 215 * loaded. All it does is register with the PCI subsystem.
 216 **/
 217static int __init e1000_init_module(void)
 218{
 219	int ret;
 220	pr_info("%s\n", e1000_driver_string);
 221
 222	pr_info("%s\n", e1000_copyright);
 223
 224	ret = pci_register_driver(&e1000_driver);
 225	if (copybreak != COPYBREAK_DEFAULT) {
 226		if (copybreak == 0)
 227			pr_info("copybreak disabled\n");
 228		else
 229			pr_info("copybreak enabled for "
 230				   "packets <= %u bytes\n", copybreak);
 231	}
 232	return ret;
 233}
 234
 235module_init(e1000_init_module);
 236
 237/**
 238 * e1000_exit_module - Driver Exit Cleanup Routine
 239 *
 240 * e1000_exit_module is called just before the driver is removed
 241 * from memory.
 242 **/
 243static void __exit e1000_exit_module(void)
 244{
 245	pci_unregister_driver(&e1000_driver);
 246}
 247
 248module_exit(e1000_exit_module);
 249
 250static int e1000_request_irq(struct e1000_adapter *adapter)
 251{
 252	struct net_device *netdev = adapter->netdev;
 253	irq_handler_t handler = e1000_intr;
 254	int irq_flags = IRQF_SHARED;
 255	int err;
 256
 257	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 258			  netdev);
 259	if (err) {
 260		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 261	}
 262
 263	return err;
 264}
 265
 266static void e1000_free_irq(struct e1000_adapter *adapter)
 267{
 268	struct net_device *netdev = adapter->netdev;
 269
 270	free_irq(adapter->pdev->irq, netdev);
 271}
 272
 273/**
 274 * e1000_irq_disable - Mask off interrupt generation on the NIC
 275 * @adapter: board private structure
 276 **/
 277static void e1000_irq_disable(struct e1000_adapter *adapter)
 278{
 279	struct e1000_hw *hw = &adapter->hw;
 280
 281	ew32(IMC, ~0);
 282	E1000_WRITE_FLUSH();
 283	synchronize_irq(adapter->pdev->irq);
 284}
 285
 286/**
 287 * e1000_irq_enable - Enable default interrupt generation settings
 288 * @adapter: board private structure
 289 **/
 290static void e1000_irq_enable(struct e1000_adapter *adapter)
 291{
 292	struct e1000_hw *hw = &adapter->hw;
 293
 294	ew32(IMS, IMS_ENABLE_MASK);
 295	E1000_WRITE_FLUSH();
 296}
 297
 298static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 299{
 300	struct e1000_hw *hw = &adapter->hw;
 301	struct net_device *netdev = adapter->netdev;
 302	u16 vid = hw->mng_cookie.vlan_id;
 303	u16 old_vid = adapter->mng_vlan_id;
 304
 305	if (!e1000_vlan_used(adapter))
 306		return;
 307
 308	if (!test_bit(vid, adapter->active_vlans)) {
 309		if (hw->mng_cookie.status &
 310		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 311			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 312			adapter->mng_vlan_id = vid;
 313		} else {
 314			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 315		}
 316		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 317		    (vid != old_vid) &&
 318		    !test_bit(old_vid, adapter->active_vlans))
 319			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 320					       old_vid);
 321	} else {
 322		adapter->mng_vlan_id = vid;
 323	}
 324}
 325
 326static void e1000_init_manageability(struct e1000_adapter *adapter)
 327{
 328	struct e1000_hw *hw = &adapter->hw;
 329
 330	if (adapter->en_mng_pt) {
 331		u32 manc = er32(MANC);
 332
 333		/* disable hardware interception of ARP */
 334		manc &= ~(E1000_MANC_ARP_EN);
 335
 336		ew32(MANC, manc);
 337	}
 338}
 339
 340static void e1000_release_manageability(struct e1000_adapter *adapter)
 341{
 342	struct e1000_hw *hw = &adapter->hw;
 343
 344	if (adapter->en_mng_pt) {
 345		u32 manc = er32(MANC);
 346
 347		/* re-enable hardware interception of ARP */
 348		manc |= E1000_MANC_ARP_EN;
 349
 350		ew32(MANC, manc);
 351	}
 352}
 353
 354/**
 355 * e1000_configure - configure the hardware for RX and TX
 356 * @adapter: private board structure
 357 **/
 358static void e1000_configure(struct e1000_adapter *adapter)
 359{
 360	struct net_device *netdev = adapter->netdev;
 361	int i;
 362
 363	e1000_set_rx_mode(netdev);
 364
 365	e1000_restore_vlan(adapter);
 366	e1000_init_manageability(adapter);
 367
 368	e1000_configure_tx(adapter);
 369	e1000_setup_rctl(adapter);
 370	e1000_configure_rx(adapter);
 371	/* call E1000_DESC_UNUSED which always leaves
 372	 * at least 1 descriptor unused to make sure
 373	 * next_to_use != next_to_clean
 374	 */
 375	for (i = 0; i < adapter->num_rx_queues; i++) {
 376		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 377		adapter->alloc_rx_buf(adapter, ring,
 378				      E1000_DESC_UNUSED(ring));
 379	}
 380}
 381
 382int e1000_up(struct e1000_adapter *adapter)
 383{
 384	struct e1000_hw *hw = &adapter->hw;
 385
 386	/* hardware has been reset, we need to reload some things */
 387	e1000_configure(adapter);
 388
 389	clear_bit(__E1000_DOWN, &adapter->flags);
 390
 391	napi_enable(&adapter->napi);
 392
 393	e1000_irq_enable(adapter);
 394
 395	netif_wake_queue(adapter->netdev);
 396
 397	/* fire a link change interrupt to start the watchdog */
 398	ew32(ICS, E1000_ICS_LSC);
 399	return 0;
 400}
 401
 402/**
 403 * e1000_power_up_phy - restore link in case the phy was powered down
 404 * @adapter: address of board private structure
 405 *
 406 * The phy may be powered down to save power and turn off link when the
 407 * driver is unloaded and wake on lan is not enabled (among others)
 408 * *** this routine MUST be followed by a call to e1000_reset ***
 409 **/
 410void e1000_power_up_phy(struct e1000_adapter *adapter)
 411{
 412	struct e1000_hw *hw = &adapter->hw;
 413	u16 mii_reg = 0;
 414
 415	/* Just clear the power down bit to wake the phy back up */
 416	if (hw->media_type == e1000_media_type_copper) {
 417		/* according to the manual, the phy will retain its
 418		 * settings across a power-down/up cycle
 419		 */
 420		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 421		mii_reg &= ~MII_CR_POWER_DOWN;
 422		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 423	}
 424}
 425
 426static void e1000_power_down_phy(struct e1000_adapter *adapter)
 427{
 428	struct e1000_hw *hw = &adapter->hw;
 429
 430	/* Power down the PHY so no link is implied when interface is down *
 431	 * The PHY cannot be powered down if any of the following is true *
 432	 * (a) WoL is enabled
 433	 * (b) AMT is active
 434	 * (c) SoL/IDER session is active
 435	 */
 436	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 437	   hw->media_type == e1000_media_type_copper) {
 438		u16 mii_reg = 0;
 439
 440		switch (hw->mac_type) {
 441		case e1000_82540:
 442		case e1000_82545:
 443		case e1000_82545_rev_3:
 444		case e1000_82546:
 445		case e1000_ce4100:
 446		case e1000_82546_rev_3:
 447		case e1000_82541:
 448		case e1000_82541_rev_2:
 449		case e1000_82547:
 450		case e1000_82547_rev_2:
 451			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 452				goto out;
 453			break;
 454		default:
 455			goto out;
 456		}
 457		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 458		mii_reg |= MII_CR_POWER_DOWN;
 459		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 460		msleep(1);
 461	}
 462out:
 463	return;
 464}
 465
 466static void e1000_down_and_stop(struct e1000_adapter *adapter)
 467{
 468	set_bit(__E1000_DOWN, &adapter->flags);
 469
 470	cancel_delayed_work_sync(&adapter->watchdog_task);
 471
 472	/*
 473	 * Since the watchdog task can reschedule other tasks, we should cancel
 474	 * it first, otherwise we can run into the situation when a work is
 475	 * still running after the adapter has been turned down.
 476	 */
 477
 478	cancel_delayed_work_sync(&adapter->phy_info_task);
 479	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 480
 481	/* Only kill reset task if adapter is not resetting */
 482	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 483		cancel_work_sync(&adapter->reset_task);
 484}
 485
 486void e1000_down(struct e1000_adapter *adapter)
 487{
 488	struct e1000_hw *hw = &adapter->hw;
 489	struct net_device *netdev = adapter->netdev;
 490	u32 rctl, tctl;
 491
 492	/* disable receives in the hardware */
 493	rctl = er32(RCTL);
 494	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 495	/* flush and sleep below */
 496
 497	netif_tx_disable(netdev);
 498
 499	/* disable transmits in the hardware */
 500	tctl = er32(TCTL);
 501	tctl &= ~E1000_TCTL_EN;
 502	ew32(TCTL, tctl);
 503	/* flush both disables and wait for them to finish */
 504	E1000_WRITE_FLUSH();
 505	msleep(10);
 506
 507	/* Set the carrier off after transmits have been disabled in the
 508	 * hardware, to avoid race conditions with e1000_watchdog() (which
 509	 * may be running concurrently to us, checking for the carrier
 510	 * bit to decide whether it should enable transmits again). Such
 511	 * a race condition would result into transmission being disabled
 512	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 513	 */
 514	netif_carrier_off(netdev);
 515
 516	netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, NULL);
 517	netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, NULL);
 518	napi_disable(&adapter->napi);
 519
 520	e1000_irq_disable(adapter);
 521
 522	/* Setting DOWN must be after irq_disable to prevent
 523	 * a screaming interrupt.  Setting DOWN also prevents
 524	 * tasks from rescheduling.
 525	 */
 526	e1000_down_and_stop(adapter);
 527
 528	adapter->link_speed = 0;
 529	adapter->link_duplex = 0;
 530
 531	e1000_reset(adapter);
 532	e1000_clean_all_tx_rings(adapter);
 533	e1000_clean_all_rx_rings(adapter);
 534}
 535
 536void e1000_reinit_locked(struct e1000_adapter *adapter)
 537{
 
 538	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 539		msleep(1);
 540
 541	/* only run the task if not already down */
 542	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
 543		e1000_down(adapter);
 544		e1000_up(adapter);
 545	}
 546
 547	clear_bit(__E1000_RESETTING, &adapter->flags);
 548}
 549
 550void e1000_reset(struct e1000_adapter *adapter)
 551{
 552	struct e1000_hw *hw = &adapter->hw;
 553	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 554	bool legacy_pba_adjust = false;
 555	u16 hwm;
 556
 557	/* Repartition Pba for greater than 9k mtu
 558	 * To take effect CTRL.RST is required.
 559	 */
 560
 561	switch (hw->mac_type) {
 562	case e1000_82542_rev2_0:
 563	case e1000_82542_rev2_1:
 564	case e1000_82543:
 565	case e1000_82544:
 566	case e1000_82540:
 567	case e1000_82541:
 568	case e1000_82541_rev_2:
 569		legacy_pba_adjust = true;
 570		pba = E1000_PBA_48K;
 571		break;
 572	case e1000_82545:
 573	case e1000_82545_rev_3:
 574	case e1000_82546:
 575	case e1000_ce4100:
 576	case e1000_82546_rev_3:
 577		pba = E1000_PBA_48K;
 578		break;
 579	case e1000_82547:
 580	case e1000_82547_rev_2:
 581		legacy_pba_adjust = true;
 582		pba = E1000_PBA_30K;
 583		break;
 584	case e1000_undefined:
 585	case e1000_num_macs:
 586		break;
 587	}
 588
 589	if (legacy_pba_adjust) {
 590		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 591			pba -= 8; /* allocate more FIFO for Tx */
 592
 593		if (hw->mac_type == e1000_82547) {
 594			adapter->tx_fifo_head = 0;
 595			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 596			adapter->tx_fifo_size =
 597				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 598			atomic_set(&adapter->tx_fifo_stall, 0);
 599		}
 600	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 601		/* adjust PBA for jumbo frames */
 602		ew32(PBA, pba);
 603
 604		/* To maintain wire speed transmits, the Tx FIFO should be
 605		 * large enough to accommodate two full transmit packets,
 606		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 607		 * the Rx FIFO should be large enough to accommodate at least
 608		 * one full receive packet and is similarly rounded up and
 609		 * expressed in KB.
 610		 */
 611		pba = er32(PBA);
 612		/* upper 16 bits has Tx packet buffer allocation size in KB */
 613		tx_space = pba >> 16;
 614		/* lower 16 bits has Rx packet buffer allocation size in KB */
 615		pba &= 0xffff;
 616		/* the Tx fifo also stores 16 bytes of information about the Tx
 617		 * but don't include ethernet FCS because hardware appends it
 618		 */
 619		min_tx_space = (hw->max_frame_size +
 620				sizeof(struct e1000_tx_desc) -
 621				ETH_FCS_LEN) * 2;
 622		min_tx_space = ALIGN(min_tx_space, 1024);
 623		min_tx_space >>= 10;
 624		/* software strips receive CRC, so leave room for it */
 625		min_rx_space = hw->max_frame_size;
 626		min_rx_space = ALIGN(min_rx_space, 1024);
 627		min_rx_space >>= 10;
 628
 629		/* If current Tx allocation is less than the min Tx FIFO size,
 630		 * and the min Tx FIFO size is less than the current Rx FIFO
 631		 * allocation, take space away from current Rx allocation
 632		 */
 633		if (tx_space < min_tx_space &&
 634		    ((min_tx_space - tx_space) < pba)) {
 635			pba = pba - (min_tx_space - tx_space);
 636
 637			/* PCI/PCIx hardware has PBA alignment constraints */
 638			switch (hw->mac_type) {
 639			case e1000_82545 ... e1000_82546_rev_3:
 640				pba &= ~(E1000_PBA_8K - 1);
 641				break;
 642			default:
 643				break;
 644			}
 645
 646			/* if short on Rx space, Rx wins and must trump Tx
 647			 * adjustment or use Early Receive if available
 648			 */
 649			if (pba < min_rx_space)
 650				pba = min_rx_space;
 651		}
 652	}
 653
 654	ew32(PBA, pba);
 655
 656	/* flow control settings:
 657	 * The high water mark must be low enough to fit one full frame
 658	 * (or the size used for early receive) above it in the Rx FIFO.
 659	 * Set it to the lower of:
 660	 * - 90% of the Rx FIFO size, and
 661	 * - the full Rx FIFO size minus the early receive size (for parts
 662	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 663	 * - the full Rx FIFO size minus one full frame
 664	 */
 665	hwm = min(((pba << 10) * 9 / 10),
 666		  ((pba << 10) - hw->max_frame_size));
 667
 668	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 669	hw->fc_low_water = hw->fc_high_water - 8;
 670	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 671	hw->fc_send_xon = 1;
 672	hw->fc = hw->original_fc;
 673
 674	/* Allow time for pending master requests to run */
 675	e1000_reset_hw(hw);
 676	if (hw->mac_type >= e1000_82544)
 677		ew32(WUC, 0);
 678
 679	if (e1000_init_hw(hw))
 680		e_dev_err("Hardware Error\n");
 681	e1000_update_mng_vlan(adapter);
 682
 683	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 684	if (hw->mac_type >= e1000_82544 &&
 685	    hw->autoneg == 1 &&
 686	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 687		u32 ctrl = er32(CTRL);
 688		/* clear phy power management bit if we are in gig only mode,
 689		 * which if enabled will attempt negotiation to 100Mb, which
 690		 * can cause a loss of link at power off or driver unload
 691		 */
 692		ctrl &= ~E1000_CTRL_SWDPIN3;
 693		ew32(CTRL, ctrl);
 694	}
 695
 696	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 697	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 698
 699	e1000_reset_adaptive(hw);
 700	e1000_phy_get_info(hw, &adapter->phy_info);
 701
 702	e1000_release_manageability(adapter);
 703}
 704
 705/* Dump the eeprom for users having checksum issues */
 706static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 707{
 708	struct net_device *netdev = adapter->netdev;
 709	struct ethtool_eeprom eeprom;
 710	const struct ethtool_ops *ops = netdev->ethtool_ops;
 711	u8 *data;
 712	int i;
 713	u16 csum_old, csum_new = 0;
 714
 715	eeprom.len = ops->get_eeprom_len(netdev);
 716	eeprom.offset = 0;
 717
 718	data = kmalloc(eeprom.len, GFP_KERNEL);
 719	if (!data)
 720		return;
 721
 722	ops->get_eeprom(netdev, &eeprom, data);
 723
 724	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 725		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 726	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 727		csum_new += data[i] + (data[i + 1] << 8);
 728	csum_new = EEPROM_SUM - csum_new;
 729
 730	pr_err("/*********************/\n");
 731	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 732	pr_err("Calculated              : 0x%04x\n", csum_new);
 733
 734	pr_err("Offset    Values\n");
 735	pr_err("========  ======\n");
 736	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 737
 738	pr_err("Include this output when contacting your support provider.\n");
 739	pr_err("This is not a software error! Something bad happened to\n");
 740	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 741	pr_err("result in further problems, possibly loss of data,\n");
 742	pr_err("corruption or system hangs!\n");
 743	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 744	pr_err("which is invalid and requires you to set the proper MAC\n");
 745	pr_err("address manually before continuing to enable this network\n");
 746	pr_err("device. Please inspect the EEPROM dump and report the\n");
 747	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 748	pr_err("/*********************/\n");
 749
 750	kfree(data);
 751}
 752
 753/**
 754 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 755 * @pdev: PCI device information struct
 756 *
 757 * Return true if an adapter needs ioport resources
 758 **/
 759static int e1000_is_need_ioport(struct pci_dev *pdev)
 760{
 761	switch (pdev->device) {
 762	case E1000_DEV_ID_82540EM:
 763	case E1000_DEV_ID_82540EM_LOM:
 764	case E1000_DEV_ID_82540EP:
 765	case E1000_DEV_ID_82540EP_LOM:
 766	case E1000_DEV_ID_82540EP_LP:
 767	case E1000_DEV_ID_82541EI:
 768	case E1000_DEV_ID_82541EI_MOBILE:
 769	case E1000_DEV_ID_82541ER:
 770	case E1000_DEV_ID_82541ER_LOM:
 771	case E1000_DEV_ID_82541GI:
 772	case E1000_DEV_ID_82541GI_LF:
 773	case E1000_DEV_ID_82541GI_MOBILE:
 774	case E1000_DEV_ID_82544EI_COPPER:
 775	case E1000_DEV_ID_82544EI_FIBER:
 776	case E1000_DEV_ID_82544GC_COPPER:
 777	case E1000_DEV_ID_82544GC_LOM:
 778	case E1000_DEV_ID_82545EM_COPPER:
 779	case E1000_DEV_ID_82545EM_FIBER:
 780	case E1000_DEV_ID_82546EB_COPPER:
 781	case E1000_DEV_ID_82546EB_FIBER:
 782	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 783		return true;
 784	default:
 785		return false;
 786	}
 787}
 788
 789static netdev_features_t e1000_fix_features(struct net_device *netdev,
 790	netdev_features_t features)
 791{
 792	/* Since there is no support for separate Rx/Tx vlan accel
 793	 * enable/disable make sure Tx flag is always in same state as Rx.
 794	 */
 795	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 796		features |= NETIF_F_HW_VLAN_CTAG_TX;
 797	else
 798		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 799
 800	return features;
 801}
 802
 803static int e1000_set_features(struct net_device *netdev,
 804	netdev_features_t features)
 805{
 806	struct e1000_adapter *adapter = netdev_priv(netdev);
 807	netdev_features_t changed = features ^ netdev->features;
 808
 809	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 810		e1000_vlan_mode(netdev, features);
 811
 812	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 813		return 0;
 814
 815	netdev->features = features;
 816	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 817
 818	if (netif_running(netdev))
 819		e1000_reinit_locked(adapter);
 820	else
 821		e1000_reset(adapter);
 822
 823	return 1;
 824}
 825
 826static const struct net_device_ops e1000_netdev_ops = {
 827	.ndo_open		= e1000_open,
 828	.ndo_stop		= e1000_close,
 829	.ndo_start_xmit		= e1000_xmit_frame,
 830	.ndo_set_rx_mode	= e1000_set_rx_mode,
 831	.ndo_set_mac_address	= e1000_set_mac,
 832	.ndo_tx_timeout		= e1000_tx_timeout,
 833	.ndo_change_mtu		= e1000_change_mtu,
 834	.ndo_eth_ioctl		= e1000_ioctl,
 835	.ndo_validate_addr	= eth_validate_addr,
 836	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 837	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 838#ifdef CONFIG_NET_POLL_CONTROLLER
 839	.ndo_poll_controller	= e1000_netpoll,
 840#endif
 841	.ndo_fix_features	= e1000_fix_features,
 842	.ndo_set_features	= e1000_set_features,
 843};
 844
 845/**
 846 * e1000_init_hw_struct - initialize members of hw struct
 847 * @adapter: board private struct
 848 * @hw: structure used by e1000_hw.c
 849 *
 850 * Factors out initialization of the e1000_hw struct to its own function
 851 * that can be called very early at init (just after struct allocation).
 852 * Fields are initialized based on PCI device information and
 853 * OS network device settings (MTU size).
 854 * Returns negative error codes if MAC type setup fails.
 855 */
 856static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 857				struct e1000_hw *hw)
 858{
 859	struct pci_dev *pdev = adapter->pdev;
 860
 861	/* PCI config space info */
 862	hw->vendor_id = pdev->vendor;
 863	hw->device_id = pdev->device;
 864	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 865	hw->subsystem_id = pdev->subsystem_device;
 866	hw->revision_id = pdev->revision;
 867
 868	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 869
 870	hw->max_frame_size = adapter->netdev->mtu +
 871			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 872	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 873
 874	/* identify the MAC */
 875	if (e1000_set_mac_type(hw)) {
 876		e_err(probe, "Unknown MAC Type\n");
 877		return -EIO;
 878	}
 879
 880	switch (hw->mac_type) {
 881	default:
 882		break;
 883	case e1000_82541:
 884	case e1000_82547:
 885	case e1000_82541_rev_2:
 886	case e1000_82547_rev_2:
 887		hw->phy_init_script = 1;
 888		break;
 889	}
 890
 891	e1000_set_media_type(hw);
 892	e1000_get_bus_info(hw);
 893
 894	hw->wait_autoneg_complete = false;
 895	hw->tbi_compatibility_en = true;
 896	hw->adaptive_ifs = true;
 897
 898	/* Copper options */
 899
 900	if (hw->media_type == e1000_media_type_copper) {
 901		hw->mdix = AUTO_ALL_MODES;
 902		hw->disable_polarity_correction = false;
 903		hw->master_slave = E1000_MASTER_SLAVE;
 904	}
 905
 906	return 0;
 907}
 908
 909/**
 910 * e1000_probe - Device Initialization Routine
 911 * @pdev: PCI device information struct
 912 * @ent: entry in e1000_pci_tbl
 913 *
 914 * Returns 0 on success, negative on failure
 915 *
 916 * e1000_probe initializes an adapter identified by a pci_dev structure.
 917 * The OS initialization, configuring of the adapter private structure,
 918 * and a hardware reset occur.
 919 **/
 920static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 921{
 922	struct net_device *netdev;
 923	struct e1000_adapter *adapter = NULL;
 924	struct e1000_hw *hw;
 925
 926	static int cards_found;
 927	static int global_quad_port_a; /* global ksp3 port a indication */
 928	int i, err, pci_using_dac;
 929	u16 eeprom_data = 0;
 930	u16 tmp = 0;
 931	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 932	int bars, need_ioport;
 933	bool disable_dev = false;
 934
 935	/* do not allocate ioport bars when not needed */
 936	need_ioport = e1000_is_need_ioport(pdev);
 937	if (need_ioport) {
 938		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 939		err = pci_enable_device(pdev);
 940	} else {
 941		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 942		err = pci_enable_device_mem(pdev);
 943	}
 944	if (err)
 945		return err;
 946
 947	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 948	if (err)
 949		goto err_pci_reg;
 950
 951	pci_set_master(pdev);
 952	err = pci_save_state(pdev);
 953	if (err)
 954		goto err_alloc_etherdev;
 955
 956	err = -ENOMEM;
 957	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 958	if (!netdev)
 959		goto err_alloc_etherdev;
 960
 961	SET_NETDEV_DEV(netdev, &pdev->dev);
 962
 963	pci_set_drvdata(pdev, netdev);
 964	adapter = netdev_priv(netdev);
 965	adapter->netdev = netdev;
 966	adapter->pdev = pdev;
 967	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 968	adapter->bars = bars;
 969	adapter->need_ioport = need_ioport;
 970
 971	hw = &adapter->hw;
 972	hw->back = adapter;
 973
 974	err = -EIO;
 975	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 976	if (!hw->hw_addr)
 977		goto err_ioremap;
 978
 979	if (adapter->need_ioport) {
 980		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 981			if (pci_resource_len(pdev, i) == 0)
 982				continue;
 983			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 984				hw->io_base = pci_resource_start(pdev, i);
 985				break;
 986			}
 987		}
 988	}
 989
 990	/* make ready for any if (hw->...) below */
 991	err = e1000_init_hw_struct(adapter, hw);
 992	if (err)
 993		goto err_sw_init;
 994
 995	/* there is a workaround being applied below that limits
 996	 * 64-bit DMA addresses to 64-bit hardware.  There are some
 997	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
 998	 */
 999	pci_using_dac = 0;
1000	if ((hw->bus_type == e1000_bus_type_pcix) &&
1001	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1002		pci_using_dac = 1;
1003	} else {
1004		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1005		if (err) {
1006			pr_err("No usable DMA config, aborting\n");
1007			goto err_dma;
1008		}
1009	}
1010
1011	netdev->netdev_ops = &e1000_netdev_ops;
1012	e1000_set_ethtool_ops(netdev);
1013	netdev->watchdog_timeo = 5 * HZ;
1014	netif_napi_add(netdev, &adapter->napi, e1000_clean);
1015
1016	strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1017
1018	adapter->bd_number = cards_found;
1019
1020	/* setup the private structure */
1021
1022	err = e1000_sw_init(adapter);
1023	if (err)
1024		goto err_sw_init;
1025
1026	err = -EIO;
1027	if (hw->mac_type == e1000_ce4100) {
1028		hw->ce4100_gbe_mdio_base_virt =
1029					ioremap(pci_resource_start(pdev, BAR_1),
1030						pci_resource_len(pdev, BAR_1));
1031
1032		if (!hw->ce4100_gbe_mdio_base_virt)
1033			goto err_mdio_ioremap;
1034	}
1035
1036	if (hw->mac_type >= e1000_82543) {
1037		netdev->hw_features = NETIF_F_SG |
1038				   NETIF_F_HW_CSUM |
1039				   NETIF_F_HW_VLAN_CTAG_RX;
1040		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1041				   NETIF_F_HW_VLAN_CTAG_FILTER;
1042	}
1043
1044	if ((hw->mac_type >= e1000_82544) &&
1045	   (hw->mac_type != e1000_82547))
1046		netdev->hw_features |= NETIF_F_TSO;
1047
1048	netdev->priv_flags |= IFF_SUPP_NOFCS;
1049
1050	netdev->features |= netdev->hw_features;
1051	netdev->hw_features |= (NETIF_F_RXCSUM |
1052				NETIF_F_RXALL |
1053				NETIF_F_RXFCS);
1054
1055	if (pci_using_dac) {
1056		netdev->features |= NETIF_F_HIGHDMA;
1057		netdev->vlan_features |= NETIF_F_HIGHDMA;
1058	}
1059
1060	netdev->vlan_features |= (NETIF_F_TSO |
1061				  NETIF_F_HW_CSUM |
1062				  NETIF_F_SG);
1063
1064	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1065	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1066	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1067		netdev->priv_flags |= IFF_UNICAST_FLT;
1068
1069	/* MTU range: 46 - 16110 */
1070	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1071	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1072
1073	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074
1075	/* initialize eeprom parameters */
1076	if (e1000_init_eeprom_params(hw)) {
1077		e_err(probe, "EEPROM initialization failed\n");
1078		goto err_eeprom;
1079	}
1080
1081	/* before reading the EEPROM, reset the controller to
1082	 * put the device in a known good starting state
1083	 */
1084
1085	e1000_reset_hw(hw);
1086
1087	/* make sure the EEPROM is good */
1088	if (e1000_validate_eeprom_checksum(hw) < 0) {
1089		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090		e1000_dump_eeprom(adapter);
1091		/* set MAC address to all zeroes to invalidate and temporary
1092		 * disable this device for the user. This blocks regular
1093		 * traffic while still permitting ethtool ioctls from reaching
1094		 * the hardware as well as allowing the user to run the
1095		 * interface after manually setting a hw addr using
1096		 * `ip set address`
1097		 */
1098		memset(hw->mac_addr, 0, netdev->addr_len);
1099	} else {
1100		/* copy the MAC address out of the EEPROM */
1101		if (e1000_read_mac_addr(hw))
1102			e_err(probe, "EEPROM Read Error\n");
1103	}
1104	/* don't block initialization here due to bad MAC address */
1105	eth_hw_addr_set(netdev, hw->mac_addr);
1106
1107	if (!is_valid_ether_addr(netdev->dev_addr))
1108		e_err(probe, "Invalid MAC Address\n");
1109
1110
1111	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113			  e1000_82547_tx_fifo_stall_task);
1114	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116
1117	e1000_check_options(adapter);
1118
1119	/* Initial Wake on LAN setting
1120	 * If APM wake is enabled in the EEPROM,
1121	 * enable the ACPI Magic Packet filter
1122	 */
1123
1124	switch (hw->mac_type) {
1125	case e1000_82542_rev2_0:
1126	case e1000_82542_rev2_1:
1127	case e1000_82543:
1128		break;
1129	case e1000_82544:
1130		e1000_read_eeprom(hw,
1131			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133		break;
1134	case e1000_82546:
1135	case e1000_82546_rev_3:
1136		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1137			e1000_read_eeprom(hw,
1138				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139			break;
1140		}
1141		fallthrough;
1142	default:
1143		e1000_read_eeprom(hw,
1144			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145		break;
1146	}
1147	if (eeprom_data & eeprom_apme_mask)
1148		adapter->eeprom_wol |= E1000_WUFC_MAG;
1149
1150	/* now that we have the eeprom settings, apply the special cases
1151	 * where the eeprom may be wrong or the board simply won't support
1152	 * wake on lan on a particular port
1153	 */
1154	switch (pdev->device) {
1155	case E1000_DEV_ID_82546GB_PCIE:
1156		adapter->eeprom_wol = 0;
1157		break;
1158	case E1000_DEV_ID_82546EB_FIBER:
1159	case E1000_DEV_ID_82546GB_FIBER:
1160		/* Wake events only supported on port A for dual fiber
1161		 * regardless of eeprom setting
1162		 */
1163		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164			adapter->eeprom_wol = 0;
1165		break;
1166	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167		/* if quad port adapter, disable WoL on all but port A */
1168		if (global_quad_port_a != 0)
1169			adapter->eeprom_wol = 0;
1170		else
1171			adapter->quad_port_a = true;
1172		/* Reset for multiple quad port adapters */
1173		if (++global_quad_port_a == 4)
1174			global_quad_port_a = 0;
1175		break;
1176	}
1177
1178	/* initialize the wol settings based on the eeprom settings */
1179	adapter->wol = adapter->eeprom_wol;
1180	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181
1182	/* Auto detect PHY address */
1183	if (hw->mac_type == e1000_ce4100) {
1184		for (i = 0; i < 32; i++) {
1185			hw->phy_addr = i;
1186			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187
1188			if (tmp != 0 && tmp != 0xFF)
1189				break;
1190		}
1191
1192		if (i >= 32)
1193			goto err_eeprom;
1194	}
1195
1196	/* reset the hardware with the new settings */
1197	e1000_reset(adapter);
1198
1199	strcpy(netdev->name, "eth%d");
1200	err = register_netdev(netdev);
1201	if (err)
1202		goto err_register;
1203
1204	e1000_vlan_filter_on_off(adapter, false);
1205
1206	/* print bus type/speed/width info */
1207	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214	       netdev->dev_addr);
1215
1216	/* carrier off reporting is important to ethtool even BEFORE open */
1217	netif_carrier_off(netdev);
1218
1219	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220
1221	cards_found++;
1222	return 0;
1223
1224err_register:
1225err_eeprom:
1226	e1000_phy_hw_reset(hw);
1227
1228	if (hw->flash_address)
1229		iounmap(hw->flash_address);
1230	kfree(adapter->tx_ring);
1231	kfree(adapter->rx_ring);
1232err_dma:
1233err_sw_init:
1234err_mdio_ioremap:
1235	iounmap(hw->ce4100_gbe_mdio_base_virt);
1236	iounmap(hw->hw_addr);
1237err_ioremap:
1238	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1239	free_netdev(netdev);
1240err_alloc_etherdev:
1241	pci_release_selected_regions(pdev, bars);
1242err_pci_reg:
1243	if (!adapter || disable_dev)
1244		pci_disable_device(pdev);
1245	return err;
1246}
1247
1248/**
1249 * e1000_remove - Device Removal Routine
1250 * @pdev: PCI device information struct
1251 *
1252 * e1000_remove is called by the PCI subsystem to alert the driver
1253 * that it should release a PCI device. That could be caused by a
1254 * Hot-Plug event, or because the driver is going to be removed from
1255 * memory.
1256 **/
1257static void e1000_remove(struct pci_dev *pdev)
1258{
1259	struct net_device *netdev = pci_get_drvdata(pdev);
1260	struct e1000_adapter *adapter = netdev_priv(netdev);
1261	struct e1000_hw *hw = &adapter->hw;
1262	bool disable_dev;
1263
1264	e1000_down_and_stop(adapter);
1265	e1000_release_manageability(adapter);
1266
1267	unregister_netdev(netdev);
1268
1269	e1000_phy_hw_reset(hw);
1270
1271	kfree(adapter->tx_ring);
1272	kfree(adapter->rx_ring);
1273
1274	if (hw->mac_type == e1000_ce4100)
1275		iounmap(hw->ce4100_gbe_mdio_base_virt);
1276	iounmap(hw->hw_addr);
1277	if (hw->flash_address)
1278		iounmap(hw->flash_address);
1279	pci_release_selected_regions(pdev, adapter->bars);
1280
1281	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1282	free_netdev(netdev);
1283
1284	if (disable_dev)
1285		pci_disable_device(pdev);
1286}
1287
1288/**
1289 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1290 * @adapter: board private structure to initialize
1291 *
1292 * e1000_sw_init initializes the Adapter private data structure.
1293 * e1000_init_hw_struct MUST be called before this function
1294 **/
1295static int e1000_sw_init(struct e1000_adapter *adapter)
1296{
1297	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1298
1299	adapter->num_tx_queues = 1;
1300	adapter->num_rx_queues = 1;
1301
1302	if (e1000_alloc_queues(adapter)) {
1303		e_err(probe, "Unable to allocate memory for queues\n");
1304		return -ENOMEM;
1305	}
1306
1307	/* Explicitly disable IRQ since the NIC can be in any state. */
1308	e1000_irq_disable(adapter);
1309
1310	spin_lock_init(&adapter->stats_lock);
1311
1312	set_bit(__E1000_DOWN, &adapter->flags);
1313
1314	return 0;
1315}
1316
1317/**
1318 * e1000_alloc_queues - Allocate memory for all rings
1319 * @adapter: board private structure to initialize
1320 *
1321 * We allocate one ring per queue at run-time since we don't know the
1322 * number of queues at compile-time.
1323 **/
1324static int e1000_alloc_queues(struct e1000_adapter *adapter)
1325{
1326	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1327				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1328	if (!adapter->tx_ring)
1329		return -ENOMEM;
1330
1331	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1332				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1333	if (!adapter->rx_ring) {
1334		kfree(adapter->tx_ring);
1335		return -ENOMEM;
1336	}
1337
1338	return E1000_SUCCESS;
1339}
1340
1341/**
1342 * e1000_open - Called when a network interface is made active
1343 * @netdev: network interface device structure
1344 *
1345 * Returns 0 on success, negative value on failure
1346 *
1347 * The open entry point is called when a network interface is made
1348 * active by the system (IFF_UP).  At this point all resources needed
1349 * for transmit and receive operations are allocated, the interrupt
1350 * handler is registered with the OS, the watchdog task is started,
1351 * and the stack is notified that the interface is ready.
1352 **/
1353int e1000_open(struct net_device *netdev)
1354{
1355	struct e1000_adapter *adapter = netdev_priv(netdev);
1356	struct e1000_hw *hw = &adapter->hw;
1357	int err;
1358
1359	/* disallow open during test */
1360	if (test_bit(__E1000_TESTING, &adapter->flags))
1361		return -EBUSY;
1362
1363	netif_carrier_off(netdev);
1364
1365	/* allocate transmit descriptors */
1366	err = e1000_setup_all_tx_resources(adapter);
1367	if (err)
1368		goto err_setup_tx;
1369
1370	/* allocate receive descriptors */
1371	err = e1000_setup_all_rx_resources(adapter);
1372	if (err)
1373		goto err_setup_rx;
1374
1375	e1000_power_up_phy(adapter);
1376
1377	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1378	if ((hw->mng_cookie.status &
1379			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1380		e1000_update_mng_vlan(adapter);
1381	}
1382
1383	/* before we allocate an interrupt, we must be ready to handle it.
1384	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1385	 * as soon as we call pci_request_irq, so we have to setup our
1386	 * clean_rx handler before we do so.
1387	 */
1388	e1000_configure(adapter);
1389
1390	err = e1000_request_irq(adapter);
1391	if (err)
1392		goto err_req_irq;
1393
1394	/* From here on the code is the same as e1000_up() */
1395	clear_bit(__E1000_DOWN, &adapter->flags);
1396
1397	netif_napi_set_irq(&adapter->napi, adapter->pdev->irq);
1398	napi_enable(&adapter->napi);
1399	netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_RX, &adapter->napi);
1400	netif_queue_set_napi(netdev, 0, NETDEV_QUEUE_TYPE_TX, &adapter->napi);
1401
1402	e1000_irq_enable(adapter);
1403
1404	netif_start_queue(netdev);
1405
1406	/* fire a link status change interrupt to start the watchdog */
1407	ew32(ICS, E1000_ICS_LSC);
1408
1409	return E1000_SUCCESS;
1410
1411err_req_irq:
1412	e1000_power_down_phy(adapter);
1413	e1000_free_all_rx_resources(adapter);
1414err_setup_rx:
1415	e1000_free_all_tx_resources(adapter);
1416err_setup_tx:
1417	e1000_reset(adapter);
1418
1419	return err;
1420}
1421
1422/**
1423 * e1000_close - Disables a network interface
1424 * @netdev: network interface device structure
1425 *
1426 * Returns 0, this is not allowed to fail
1427 *
1428 * The close entry point is called when an interface is de-activated
1429 * by the OS.  The hardware is still under the drivers control, but
1430 * needs to be disabled.  A global MAC reset is issued to stop the
1431 * hardware, and all transmit and receive resources are freed.
1432 **/
1433int e1000_close(struct net_device *netdev)
1434{
1435	struct e1000_adapter *adapter = netdev_priv(netdev);
1436	struct e1000_hw *hw = &adapter->hw;
1437	int count = E1000_CHECK_RESET_COUNT;
1438
1439	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1440		usleep_range(10000, 20000);
1441
1442	WARN_ON(count < 0);
1443
1444	/* signal that we're down so that the reset task will no longer run */
1445	set_bit(__E1000_DOWN, &adapter->flags);
1446	clear_bit(__E1000_RESETTING, &adapter->flags);
1447
1448	e1000_down(adapter);
1449	e1000_power_down_phy(adapter);
1450	e1000_free_irq(adapter);
1451
1452	e1000_free_all_tx_resources(adapter);
1453	e1000_free_all_rx_resources(adapter);
1454
1455	/* kill manageability vlan ID if supported, but not if a vlan with
1456	 * the same ID is registered on the host OS (let 8021q kill it)
1457	 */
1458	if ((hw->mng_cookie.status &
1459	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1460	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1461		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1462				       adapter->mng_vlan_id);
1463	}
1464
1465	return 0;
1466}
1467
1468/**
1469 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1470 * @adapter: address of board private structure
1471 * @start: address of beginning of memory
1472 * @len: length of memory
1473 **/
1474static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1475				  unsigned long len)
1476{
1477	struct e1000_hw *hw = &adapter->hw;
1478	unsigned long begin = (unsigned long)start;
1479	unsigned long end = begin + len;
1480
1481	/* First rev 82545 and 82546 need to not allow any memory
1482	 * write location to cross 64k boundary due to errata 23
1483	 */
1484	if (hw->mac_type == e1000_82545 ||
1485	    hw->mac_type == e1000_ce4100 ||
1486	    hw->mac_type == e1000_82546) {
1487		return ((begin ^ (end - 1)) >> 16) == 0;
1488	}
1489
1490	return true;
1491}
1492
1493/**
1494 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1495 * @adapter: board private structure
1496 * @txdr:    tx descriptor ring (for a specific queue) to setup
1497 *
1498 * Return 0 on success, negative on failure
1499 **/
1500static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1501				    struct e1000_tx_ring *txdr)
1502{
1503	struct pci_dev *pdev = adapter->pdev;
1504	int size;
1505
1506	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1507	txdr->buffer_info = vzalloc(size);
1508	if (!txdr->buffer_info)
1509		return -ENOMEM;
1510
1511	/* round up to nearest 4K */
1512
1513	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1514	txdr->size = ALIGN(txdr->size, 4096);
1515
1516	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1517					GFP_KERNEL);
1518	if (!txdr->desc) {
1519setup_tx_desc_die:
1520		vfree(txdr->buffer_info);
1521		return -ENOMEM;
1522	}
1523
1524	/* Fix for errata 23, can't cross 64kB boundary */
1525	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1526		void *olddesc = txdr->desc;
1527		dma_addr_t olddma = txdr->dma;
1528		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1529		      txdr->size, txdr->desc);
1530		/* Try again, without freeing the previous */
1531		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1532						&txdr->dma, GFP_KERNEL);
1533		/* Failed allocation, critical failure */
1534		if (!txdr->desc) {
1535			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1536					  olddma);
1537			goto setup_tx_desc_die;
1538		}
1539
1540		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1541			/* give up */
1542			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1543					  txdr->dma);
1544			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1545					  olddma);
1546			e_err(probe, "Unable to allocate aligned memory "
1547			      "for the transmit descriptor ring\n");
1548			vfree(txdr->buffer_info);
1549			return -ENOMEM;
1550		} else {
1551			/* Free old allocation, new allocation was successful */
1552			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1553					  olddma);
1554		}
1555	}
1556	memset(txdr->desc, 0, txdr->size);
1557
1558	txdr->next_to_use = 0;
1559	txdr->next_to_clean = 0;
1560
1561	return 0;
1562}
1563
1564/**
1565 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1566 * 				  (Descriptors) for all queues
1567 * @adapter: board private structure
1568 *
1569 * Return 0 on success, negative on failure
1570 **/
1571int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1572{
1573	int i, err = 0;
1574
1575	for (i = 0; i < adapter->num_tx_queues; i++) {
1576		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1577		if (err) {
1578			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1579			for (i-- ; i >= 0; i--)
1580				e1000_free_tx_resources(adapter,
1581							&adapter->tx_ring[i]);
1582			break;
1583		}
1584	}
1585
1586	return err;
1587}
1588
1589/**
1590 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1591 * @adapter: board private structure
1592 *
1593 * Configure the Tx unit of the MAC after a reset.
1594 **/
1595static void e1000_configure_tx(struct e1000_adapter *adapter)
1596{
1597	u64 tdba;
1598	struct e1000_hw *hw = &adapter->hw;
1599	u32 tdlen, tctl, tipg;
1600	u32 ipgr1, ipgr2;
1601
1602	/* Setup the HW Tx Head and Tail descriptor pointers */
1603
1604	switch (adapter->num_tx_queues) {
1605	case 1:
1606	default:
1607		tdba = adapter->tx_ring[0].dma;
1608		tdlen = adapter->tx_ring[0].count *
1609			sizeof(struct e1000_tx_desc);
1610		ew32(TDLEN, tdlen);
1611		ew32(TDBAH, (tdba >> 32));
1612		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1613		ew32(TDT, 0);
1614		ew32(TDH, 0);
1615		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1616					   E1000_TDH : E1000_82542_TDH);
1617		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1618					   E1000_TDT : E1000_82542_TDT);
1619		break;
1620	}
1621
1622	/* Set the default values for the Tx Inter Packet Gap timer */
1623	if ((hw->media_type == e1000_media_type_fiber ||
1624	     hw->media_type == e1000_media_type_internal_serdes))
1625		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1626	else
1627		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1628
1629	switch (hw->mac_type) {
1630	case e1000_82542_rev2_0:
1631	case e1000_82542_rev2_1:
1632		tipg = DEFAULT_82542_TIPG_IPGT;
1633		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1634		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1635		break;
1636	default:
1637		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1638		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1639		break;
1640	}
1641	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1642	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1643	ew32(TIPG, tipg);
1644
1645	/* Set the Tx Interrupt Delay register */
1646
1647	ew32(TIDV, adapter->tx_int_delay);
1648	if (hw->mac_type >= e1000_82540)
1649		ew32(TADV, adapter->tx_abs_int_delay);
1650
1651	/* Program the Transmit Control Register */
1652
1653	tctl = er32(TCTL);
1654	tctl &= ~E1000_TCTL_CT;
1655	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1656		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1657
1658	e1000_config_collision_dist(hw);
1659
1660	/* Setup Transmit Descriptor Settings for eop descriptor */
1661	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1662
1663	/* only set IDE if we are delaying interrupts using the timers */
1664	if (adapter->tx_int_delay)
1665		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1666
1667	if (hw->mac_type < e1000_82543)
1668		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1669	else
1670		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1671
1672	/* Cache if we're 82544 running in PCI-X because we'll
1673	 * need this to apply a workaround later in the send path.
1674	 */
1675	if (hw->mac_type == e1000_82544 &&
1676	    hw->bus_type == e1000_bus_type_pcix)
1677		adapter->pcix_82544 = true;
1678
1679	ew32(TCTL, tctl);
1680
1681}
1682
1683/**
1684 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1685 * @adapter: board private structure
1686 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1687 *
1688 * Returns 0 on success, negative on failure
1689 **/
1690static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1691				    struct e1000_rx_ring *rxdr)
1692{
1693	struct pci_dev *pdev = adapter->pdev;
1694	int size, desc_len;
1695
1696	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1697	rxdr->buffer_info = vzalloc(size);
1698	if (!rxdr->buffer_info)
1699		return -ENOMEM;
1700
1701	desc_len = sizeof(struct e1000_rx_desc);
1702
1703	/* Round up to nearest 4K */
1704
1705	rxdr->size = rxdr->count * desc_len;
1706	rxdr->size = ALIGN(rxdr->size, 4096);
1707
1708	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1709					GFP_KERNEL);
1710	if (!rxdr->desc) {
1711setup_rx_desc_die:
1712		vfree(rxdr->buffer_info);
1713		return -ENOMEM;
1714	}
1715
1716	/* Fix for errata 23, can't cross 64kB boundary */
1717	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1718		void *olddesc = rxdr->desc;
1719		dma_addr_t olddma = rxdr->dma;
1720		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1721		      rxdr->size, rxdr->desc);
1722		/* Try again, without freeing the previous */
1723		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1724						&rxdr->dma, GFP_KERNEL);
1725		/* Failed allocation, critical failure */
1726		if (!rxdr->desc) {
1727			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1728					  olddma);
1729			goto setup_rx_desc_die;
1730		}
1731
1732		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1733			/* give up */
1734			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1735					  rxdr->dma);
1736			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1737					  olddma);
1738			e_err(probe, "Unable to allocate aligned memory for "
1739			      "the Rx descriptor ring\n");
1740			goto setup_rx_desc_die;
1741		} else {
1742			/* Free old allocation, new allocation was successful */
1743			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1744					  olddma);
1745		}
1746	}
1747	memset(rxdr->desc, 0, rxdr->size);
1748
1749	rxdr->next_to_clean = 0;
1750	rxdr->next_to_use = 0;
1751	rxdr->rx_skb_top = NULL;
1752
1753	return 0;
1754}
1755
1756/**
1757 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1758 * 				  (Descriptors) for all queues
1759 * @adapter: board private structure
1760 *
1761 * Return 0 on success, negative on failure
1762 **/
1763int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1764{
1765	int i, err = 0;
1766
1767	for (i = 0; i < adapter->num_rx_queues; i++) {
1768		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1769		if (err) {
1770			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1771			for (i-- ; i >= 0; i--)
1772				e1000_free_rx_resources(adapter,
1773							&adapter->rx_ring[i]);
1774			break;
1775		}
1776	}
1777
1778	return err;
1779}
1780
1781/**
1782 * e1000_setup_rctl - configure the receive control registers
1783 * @adapter: Board private structure
1784 **/
1785static void e1000_setup_rctl(struct e1000_adapter *adapter)
1786{
1787	struct e1000_hw *hw = &adapter->hw;
1788	u32 rctl;
1789
1790	rctl = er32(RCTL);
1791
1792	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1793
1794	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1795		E1000_RCTL_RDMTS_HALF |
1796		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1797
1798	if (hw->tbi_compatibility_on == 1)
1799		rctl |= E1000_RCTL_SBP;
1800	else
1801		rctl &= ~E1000_RCTL_SBP;
1802
1803	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1804		rctl &= ~E1000_RCTL_LPE;
1805	else
1806		rctl |= E1000_RCTL_LPE;
1807
1808	/* Setup buffer sizes */
1809	rctl &= ~E1000_RCTL_SZ_4096;
1810	rctl |= E1000_RCTL_BSEX;
1811	switch (adapter->rx_buffer_len) {
1812	case E1000_RXBUFFER_2048:
1813	default:
1814		rctl |= E1000_RCTL_SZ_2048;
1815		rctl &= ~E1000_RCTL_BSEX;
1816		break;
1817	case E1000_RXBUFFER_4096:
1818		rctl |= E1000_RCTL_SZ_4096;
1819		break;
1820	case E1000_RXBUFFER_8192:
1821		rctl |= E1000_RCTL_SZ_8192;
1822		break;
1823	case E1000_RXBUFFER_16384:
1824		rctl |= E1000_RCTL_SZ_16384;
1825		break;
1826	}
1827
1828	/* This is useful for sniffing bad packets. */
1829	if (adapter->netdev->features & NETIF_F_RXALL) {
1830		/* UPE and MPE will be handled by normal PROMISC logic
1831		 * in e1000e_set_rx_mode
1832		 */
1833		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1834			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1835			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1836
1837		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1838			  E1000_RCTL_DPF | /* Allow filtered pause */
1839			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1840		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1841		 * and that breaks VLANs.
1842		 */
1843	}
1844
1845	ew32(RCTL, rctl);
1846}
1847
1848/**
1849 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1850 * @adapter: board private structure
1851 *
1852 * Configure the Rx unit of the MAC after a reset.
1853 **/
1854static void e1000_configure_rx(struct e1000_adapter *adapter)
1855{
1856	u64 rdba;
1857	struct e1000_hw *hw = &adapter->hw;
1858	u32 rdlen, rctl, rxcsum;
1859
1860	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1861		rdlen = adapter->rx_ring[0].count *
1862			sizeof(struct e1000_rx_desc);
1863		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1864		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1865	} else {
1866		rdlen = adapter->rx_ring[0].count *
1867			sizeof(struct e1000_rx_desc);
1868		adapter->clean_rx = e1000_clean_rx_irq;
1869		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1870	}
1871
1872	/* disable receives while setting up the descriptors */
1873	rctl = er32(RCTL);
1874	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1875
1876	/* set the Receive Delay Timer Register */
1877	ew32(RDTR, adapter->rx_int_delay);
1878
1879	if (hw->mac_type >= e1000_82540) {
1880		ew32(RADV, adapter->rx_abs_int_delay);
1881		if (adapter->itr_setting != 0)
1882			ew32(ITR, 1000000000 / (adapter->itr * 256));
1883	}
1884
1885	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1886	 * the Base and Length of the Rx Descriptor Ring
1887	 */
1888	switch (adapter->num_rx_queues) {
1889	case 1:
1890	default:
1891		rdba = adapter->rx_ring[0].dma;
1892		ew32(RDLEN, rdlen);
1893		ew32(RDBAH, (rdba >> 32));
1894		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1895		ew32(RDT, 0);
1896		ew32(RDH, 0);
1897		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1898					   E1000_RDH : E1000_82542_RDH);
1899		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1900					   E1000_RDT : E1000_82542_RDT);
1901		break;
1902	}
1903
1904	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1905	if (hw->mac_type >= e1000_82543) {
1906		rxcsum = er32(RXCSUM);
1907		if (adapter->rx_csum)
1908			rxcsum |= E1000_RXCSUM_TUOFL;
1909		else
1910			/* don't need to clear IPPCSE as it defaults to 0 */
1911			rxcsum &= ~E1000_RXCSUM_TUOFL;
1912		ew32(RXCSUM, rxcsum);
1913	}
1914
1915	/* Enable Receives */
1916	ew32(RCTL, rctl | E1000_RCTL_EN);
1917}
1918
1919/**
1920 * e1000_free_tx_resources - Free Tx Resources per Queue
1921 * @adapter: board private structure
1922 * @tx_ring: Tx descriptor ring for a specific queue
1923 *
1924 * Free all transmit software resources
1925 **/
1926static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1927				    struct e1000_tx_ring *tx_ring)
1928{
1929	struct pci_dev *pdev = adapter->pdev;
1930
1931	e1000_clean_tx_ring(adapter, tx_ring);
1932
1933	vfree(tx_ring->buffer_info);
1934	tx_ring->buffer_info = NULL;
1935
1936	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1937			  tx_ring->dma);
1938
1939	tx_ring->desc = NULL;
1940}
1941
1942/**
1943 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1944 * @adapter: board private structure
1945 *
1946 * Free all transmit software resources
1947 **/
1948void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1949{
1950	int i;
1951
1952	for (i = 0; i < adapter->num_tx_queues; i++)
1953		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1954}
1955
1956static void
1957e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1958				 struct e1000_tx_buffer *buffer_info,
1959				 int budget)
1960{
1961	if (buffer_info->dma) {
1962		if (buffer_info->mapped_as_page)
1963			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1964				       buffer_info->length, DMA_TO_DEVICE);
1965		else
1966			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1967					 buffer_info->length,
1968					 DMA_TO_DEVICE);
1969		buffer_info->dma = 0;
1970	}
1971	if (buffer_info->skb) {
1972		napi_consume_skb(buffer_info->skb, budget);
1973		buffer_info->skb = NULL;
1974	}
1975	buffer_info->time_stamp = 0;
1976	/* buffer_info must be completely set up in the transmit path */
1977}
1978
1979/**
1980 * e1000_clean_tx_ring - Free Tx Buffers
1981 * @adapter: board private structure
1982 * @tx_ring: ring to be cleaned
1983 **/
1984static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1985				struct e1000_tx_ring *tx_ring)
1986{
1987	struct e1000_hw *hw = &adapter->hw;
1988	struct e1000_tx_buffer *buffer_info;
1989	unsigned long size;
1990	unsigned int i;
1991
1992	/* Free all the Tx ring sk_buffs */
1993
1994	for (i = 0; i < tx_ring->count; i++) {
1995		buffer_info = &tx_ring->buffer_info[i];
1996		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1997	}
1998
1999	netdev_reset_queue(adapter->netdev);
2000	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2001	memset(tx_ring->buffer_info, 0, size);
2002
2003	/* Zero out the descriptor ring */
2004
2005	memset(tx_ring->desc, 0, tx_ring->size);
2006
2007	tx_ring->next_to_use = 0;
2008	tx_ring->next_to_clean = 0;
2009	tx_ring->last_tx_tso = false;
2010
2011	writel(0, hw->hw_addr + tx_ring->tdh);
2012	writel(0, hw->hw_addr + tx_ring->tdt);
2013}
2014
2015/**
2016 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2017 * @adapter: board private structure
2018 **/
2019static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2020{
2021	int i;
2022
2023	for (i = 0; i < adapter->num_tx_queues; i++)
2024		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2025}
2026
2027/**
2028 * e1000_free_rx_resources - Free Rx Resources
2029 * @adapter: board private structure
2030 * @rx_ring: ring to clean the resources from
2031 *
2032 * Free all receive software resources
2033 **/
2034static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2035				    struct e1000_rx_ring *rx_ring)
2036{
2037	struct pci_dev *pdev = adapter->pdev;
2038
2039	e1000_clean_rx_ring(adapter, rx_ring);
2040
2041	vfree(rx_ring->buffer_info);
2042	rx_ring->buffer_info = NULL;
2043
2044	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2045			  rx_ring->dma);
2046
2047	rx_ring->desc = NULL;
2048}
2049
2050/**
2051 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2052 * @adapter: board private structure
2053 *
2054 * Free all receive software resources
2055 **/
2056void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2057{
2058	int i;
2059
2060	for (i = 0; i < adapter->num_rx_queues; i++)
2061		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2062}
2063
2064#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2065static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2066{
2067	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2068		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2069}
2070
2071static void *e1000_alloc_frag(const struct e1000_adapter *a)
2072{
2073	unsigned int len = e1000_frag_len(a);
2074	u8 *data = netdev_alloc_frag(len);
2075
2076	if (likely(data))
2077		data += E1000_HEADROOM;
2078	return data;
2079}
2080
2081/**
2082 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2083 * @adapter: board private structure
2084 * @rx_ring: ring to free buffers from
2085 **/
2086static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2087				struct e1000_rx_ring *rx_ring)
2088{
2089	struct e1000_hw *hw = &adapter->hw;
2090	struct e1000_rx_buffer *buffer_info;
2091	struct pci_dev *pdev = adapter->pdev;
2092	unsigned long size;
2093	unsigned int i;
2094
2095	/* Free all the Rx netfrags */
2096	for (i = 0; i < rx_ring->count; i++) {
2097		buffer_info = &rx_ring->buffer_info[i];
2098		if (adapter->clean_rx == e1000_clean_rx_irq) {
2099			if (buffer_info->dma)
2100				dma_unmap_single(&pdev->dev, buffer_info->dma,
2101						 adapter->rx_buffer_len,
2102						 DMA_FROM_DEVICE);
2103			if (buffer_info->rxbuf.data) {
2104				skb_free_frag(buffer_info->rxbuf.data);
2105				buffer_info->rxbuf.data = NULL;
2106			}
2107		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2108			if (buffer_info->dma)
2109				dma_unmap_page(&pdev->dev, buffer_info->dma,
2110					       adapter->rx_buffer_len,
2111					       DMA_FROM_DEVICE);
2112			if (buffer_info->rxbuf.page) {
2113				put_page(buffer_info->rxbuf.page);
2114				buffer_info->rxbuf.page = NULL;
2115			}
2116		}
2117
2118		buffer_info->dma = 0;
2119	}
2120
2121	/* there also may be some cached data from a chained receive */
2122	napi_free_frags(&adapter->napi);
2123	rx_ring->rx_skb_top = NULL;
2124
2125	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2126	memset(rx_ring->buffer_info, 0, size);
2127
2128	/* Zero out the descriptor ring */
2129	memset(rx_ring->desc, 0, rx_ring->size);
2130
2131	rx_ring->next_to_clean = 0;
2132	rx_ring->next_to_use = 0;
2133
2134	writel(0, hw->hw_addr + rx_ring->rdh);
2135	writel(0, hw->hw_addr + rx_ring->rdt);
2136}
2137
2138/**
2139 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2140 * @adapter: board private structure
2141 **/
2142static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2143{
2144	int i;
2145
2146	for (i = 0; i < adapter->num_rx_queues; i++)
2147		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2148}
2149
2150/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2151 * and memory write and invalidate disabled for certain operations
2152 */
2153static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2154{
2155	struct e1000_hw *hw = &adapter->hw;
2156	struct net_device *netdev = adapter->netdev;
2157	u32 rctl;
2158
2159	e1000_pci_clear_mwi(hw);
2160
2161	rctl = er32(RCTL);
2162	rctl |= E1000_RCTL_RST;
2163	ew32(RCTL, rctl);
2164	E1000_WRITE_FLUSH();
2165	mdelay(5);
2166
2167	if (netif_running(netdev))
2168		e1000_clean_all_rx_rings(adapter);
2169}
2170
2171static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2172{
2173	struct e1000_hw *hw = &adapter->hw;
2174	struct net_device *netdev = adapter->netdev;
2175	u32 rctl;
2176
2177	rctl = er32(RCTL);
2178	rctl &= ~E1000_RCTL_RST;
2179	ew32(RCTL, rctl);
2180	E1000_WRITE_FLUSH();
2181	mdelay(5);
2182
2183	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2184		e1000_pci_set_mwi(hw);
2185
2186	if (netif_running(netdev)) {
2187		/* No need to loop, because 82542 supports only 1 queue */
2188		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2189		e1000_configure_rx(adapter);
2190		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2191	}
2192}
2193
2194/**
2195 * e1000_set_mac - Change the Ethernet Address of the NIC
2196 * @netdev: network interface device structure
2197 * @p: pointer to an address structure
2198 *
2199 * Returns 0 on success, negative on failure
2200 **/
2201static int e1000_set_mac(struct net_device *netdev, void *p)
2202{
2203	struct e1000_adapter *adapter = netdev_priv(netdev);
2204	struct e1000_hw *hw = &adapter->hw;
2205	struct sockaddr *addr = p;
2206
2207	if (!is_valid_ether_addr(addr->sa_data))
2208		return -EADDRNOTAVAIL;
2209
2210	/* 82542 2.0 needs to be in reset to write receive address registers */
2211
2212	if (hw->mac_type == e1000_82542_rev2_0)
2213		e1000_enter_82542_rst(adapter);
2214
2215	eth_hw_addr_set(netdev, addr->sa_data);
2216	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2217
2218	e1000_rar_set(hw, hw->mac_addr, 0);
2219
2220	if (hw->mac_type == e1000_82542_rev2_0)
2221		e1000_leave_82542_rst(adapter);
2222
2223	return 0;
2224}
2225
2226/**
2227 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2228 * @netdev: network interface device structure
2229 *
2230 * The set_rx_mode entry point is called whenever the unicast or multicast
2231 * address lists or the network interface flags are updated. This routine is
2232 * responsible for configuring the hardware for proper unicast, multicast,
2233 * promiscuous mode, and all-multi behavior.
2234 **/
2235static void e1000_set_rx_mode(struct net_device *netdev)
2236{
2237	struct e1000_adapter *adapter = netdev_priv(netdev);
2238	struct e1000_hw *hw = &adapter->hw;
2239	struct netdev_hw_addr *ha;
2240	bool use_uc = false;
2241	u32 rctl;
2242	u32 hash_value;
2243	int i, rar_entries = E1000_RAR_ENTRIES;
2244	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2245	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2246
2247	if (!mcarray)
2248		return;
2249
2250	/* Check for Promiscuous and All Multicast modes */
2251
2252	rctl = er32(RCTL);
2253
2254	if (netdev->flags & IFF_PROMISC) {
2255		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2256		rctl &= ~E1000_RCTL_VFE;
2257	} else {
2258		if (netdev->flags & IFF_ALLMULTI)
2259			rctl |= E1000_RCTL_MPE;
2260		else
2261			rctl &= ~E1000_RCTL_MPE;
2262		/* Enable VLAN filter if there is a VLAN */
2263		if (e1000_vlan_used(adapter))
2264			rctl |= E1000_RCTL_VFE;
2265	}
2266
2267	if (netdev_uc_count(netdev) > rar_entries - 1) {
2268		rctl |= E1000_RCTL_UPE;
2269	} else if (!(netdev->flags & IFF_PROMISC)) {
2270		rctl &= ~E1000_RCTL_UPE;
2271		use_uc = true;
2272	}
2273
2274	ew32(RCTL, rctl);
2275
2276	/* 82542 2.0 needs to be in reset to write receive address registers */
2277
2278	if (hw->mac_type == e1000_82542_rev2_0)
2279		e1000_enter_82542_rst(adapter);
2280
2281	/* load the first 14 addresses into the exact filters 1-14. Unicast
2282	 * addresses take precedence to avoid disabling unicast filtering
2283	 * when possible.
2284	 *
2285	 * RAR 0 is used for the station MAC address
2286	 * if there are not 14 addresses, go ahead and clear the filters
2287	 */
2288	i = 1;
2289	if (use_uc)
2290		netdev_for_each_uc_addr(ha, netdev) {
2291			if (i == rar_entries)
2292				break;
2293			e1000_rar_set(hw, ha->addr, i++);
2294		}
2295
2296	netdev_for_each_mc_addr(ha, netdev) {
2297		if (i == rar_entries) {
2298			/* load any remaining addresses into the hash table */
2299			u32 hash_reg, hash_bit, mta;
2300			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2301			hash_reg = (hash_value >> 5) & 0x7F;
2302			hash_bit = hash_value & 0x1F;
2303			mta = (1 << hash_bit);
2304			mcarray[hash_reg] |= mta;
2305		} else {
2306			e1000_rar_set(hw, ha->addr, i++);
2307		}
2308	}
2309
2310	for (; i < rar_entries; i++) {
2311		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2312		E1000_WRITE_FLUSH();
2313		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2314		E1000_WRITE_FLUSH();
2315	}
2316
2317	/* write the hash table completely, write from bottom to avoid
2318	 * both stupid write combining chipsets, and flushing each write
2319	 */
2320	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2321		/* If we are on an 82544 has an errata where writing odd
2322		 * offsets overwrites the previous even offset, but writing
2323		 * backwards over the range solves the issue by always
2324		 * writing the odd offset first
2325		 */
2326		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2327	}
2328	E1000_WRITE_FLUSH();
2329
2330	if (hw->mac_type == e1000_82542_rev2_0)
2331		e1000_leave_82542_rst(adapter);
2332
2333	kfree(mcarray);
2334}
2335
2336/**
2337 * e1000_update_phy_info_task - get phy info
2338 * @work: work struct contained inside adapter struct
2339 *
2340 * Need to wait a few seconds after link up to get diagnostic information from
2341 * the phy
2342 */
2343static void e1000_update_phy_info_task(struct work_struct *work)
2344{
2345	struct e1000_adapter *adapter = container_of(work,
2346						     struct e1000_adapter,
2347						     phy_info_task.work);
2348
2349	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2350}
2351
2352/**
2353 * e1000_82547_tx_fifo_stall_task - task to complete work
2354 * @work: work struct contained inside adapter struct
2355 **/
2356static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2357{
2358	struct e1000_adapter *adapter = container_of(work,
2359						     struct e1000_adapter,
2360						     fifo_stall_task.work);
2361	struct e1000_hw *hw = &adapter->hw;
2362	struct net_device *netdev = adapter->netdev;
2363	u32 tctl;
2364
2365	if (atomic_read(&adapter->tx_fifo_stall)) {
2366		if ((er32(TDT) == er32(TDH)) &&
2367		   (er32(TDFT) == er32(TDFH)) &&
2368		   (er32(TDFTS) == er32(TDFHS))) {
2369			tctl = er32(TCTL);
2370			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2371			ew32(TDFT, adapter->tx_head_addr);
2372			ew32(TDFH, adapter->tx_head_addr);
2373			ew32(TDFTS, adapter->tx_head_addr);
2374			ew32(TDFHS, adapter->tx_head_addr);
2375			ew32(TCTL, tctl);
2376			E1000_WRITE_FLUSH();
2377
2378			adapter->tx_fifo_head = 0;
2379			atomic_set(&adapter->tx_fifo_stall, 0);
2380			netif_wake_queue(netdev);
2381		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2382			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2383		}
2384	}
2385}
2386
2387bool e1000_has_link(struct e1000_adapter *adapter)
2388{
2389	struct e1000_hw *hw = &adapter->hw;
2390	bool link_active = false;
2391
2392	/* get_link_status is set on LSC (link status) interrupt or rx
2393	 * sequence error interrupt (except on intel ce4100).
2394	 * get_link_status will stay false until the
2395	 * e1000_check_for_link establishes link for copper adapters
2396	 * ONLY
2397	 */
2398	switch (hw->media_type) {
2399	case e1000_media_type_copper:
2400		if (hw->mac_type == e1000_ce4100)
2401			hw->get_link_status = 1;
2402		if (hw->get_link_status) {
2403			e1000_check_for_link(hw);
2404			link_active = !hw->get_link_status;
2405		} else {
2406			link_active = true;
2407		}
2408		break;
2409	case e1000_media_type_fiber:
2410		e1000_check_for_link(hw);
2411		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2412		break;
2413	case e1000_media_type_internal_serdes:
2414		e1000_check_for_link(hw);
2415		link_active = hw->serdes_has_link;
2416		break;
2417	default:
2418		break;
2419	}
2420
2421	return link_active;
2422}
2423
2424/**
2425 * e1000_watchdog - work function
2426 * @work: work struct contained inside adapter struct
2427 **/
2428static void e1000_watchdog(struct work_struct *work)
2429{
2430	struct e1000_adapter *adapter = container_of(work,
2431						     struct e1000_adapter,
2432						     watchdog_task.work);
2433	struct e1000_hw *hw = &adapter->hw;
2434	struct net_device *netdev = adapter->netdev;
2435	struct e1000_tx_ring *txdr = adapter->tx_ring;
2436	u32 link, tctl;
2437
2438	link = e1000_has_link(adapter);
2439	if ((netif_carrier_ok(netdev)) && link)
2440		goto link_up;
2441
2442	if (link) {
2443		if (!netif_carrier_ok(netdev)) {
2444			u32 ctrl;
 
2445			/* update snapshot of PHY registers on LSC */
2446			e1000_get_speed_and_duplex(hw,
2447						   &adapter->link_speed,
2448						   &adapter->link_duplex);
2449
2450			ctrl = er32(CTRL);
2451			pr_info("%s NIC Link is Up %d Mbps %s, "
2452				"Flow Control: %s\n",
2453				netdev->name,
2454				adapter->link_speed,
2455				adapter->link_duplex == FULL_DUPLEX ?
2456				"Full Duplex" : "Half Duplex",
2457				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2458				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2459				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2460				E1000_CTRL_TFCE) ? "TX" : "None")));
2461
2462			/* adjust timeout factor according to speed/duplex */
2463			adapter->tx_timeout_factor = 1;
2464			switch (adapter->link_speed) {
2465			case SPEED_10:
 
2466				adapter->tx_timeout_factor = 16;
2467				break;
2468			case SPEED_100:
 
2469				/* maybe add some timeout factor ? */
2470				break;
2471			}
2472
2473			/* enable transmits in the hardware */
2474			tctl = er32(TCTL);
2475			tctl |= E1000_TCTL_EN;
2476			ew32(TCTL, tctl);
2477
2478			netif_carrier_on(netdev);
2479			if (!test_bit(__E1000_DOWN, &adapter->flags))
2480				schedule_delayed_work(&adapter->phy_info_task,
2481						      2 * HZ);
2482			adapter->smartspeed = 0;
2483		}
2484	} else {
2485		if (netif_carrier_ok(netdev)) {
2486			adapter->link_speed = 0;
2487			adapter->link_duplex = 0;
2488			pr_info("%s NIC Link is Down\n",
2489				netdev->name);
2490			netif_carrier_off(netdev);
2491
2492			if (!test_bit(__E1000_DOWN, &adapter->flags))
2493				schedule_delayed_work(&adapter->phy_info_task,
2494						      2 * HZ);
2495		}
2496
2497		e1000_smartspeed(adapter);
2498	}
2499
2500link_up:
2501	e1000_update_stats(adapter);
2502
2503	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2504	adapter->tpt_old = adapter->stats.tpt;
2505	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2506	adapter->colc_old = adapter->stats.colc;
2507
2508	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2509	adapter->gorcl_old = adapter->stats.gorcl;
2510	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2511	adapter->gotcl_old = adapter->stats.gotcl;
2512
2513	e1000_update_adaptive(hw);
2514
2515	if (!netif_carrier_ok(netdev)) {
2516		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2517			/* We've lost link, so the controller stops DMA,
2518			 * but we've got queued Tx work that's never going
2519			 * to get done, so reset controller to flush Tx.
2520			 * (Do the reset outside of interrupt context).
2521			 */
2522			adapter->tx_timeout_count++;
2523			schedule_work(&adapter->reset_task);
2524			/* exit immediately since reset is imminent */
2525			return;
2526		}
2527	}
2528
2529	/* Simple mode for Interrupt Throttle Rate (ITR) */
2530	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2531		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2532		 * Total asymmetrical Tx or Rx gets ITR=8000;
2533		 * everyone else is between 2000-8000.
2534		 */
2535		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2536		u32 dif = (adapter->gotcl > adapter->gorcl ?
2537			    adapter->gotcl - adapter->gorcl :
2538			    adapter->gorcl - adapter->gotcl) / 10000;
2539		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2540
2541		ew32(ITR, 1000000000 / (itr * 256));
2542	}
2543
2544	/* Cause software interrupt to ensure rx ring is cleaned */
2545	ew32(ICS, E1000_ICS_RXDMT0);
2546
2547	/* Force detection of hung controller every watchdog period */
2548	adapter->detect_tx_hung = true;
2549
2550	/* Reschedule the task */
2551	if (!test_bit(__E1000_DOWN, &adapter->flags))
2552		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2553}
2554
2555enum latency_range {
2556	lowest_latency = 0,
2557	low_latency = 1,
2558	bulk_latency = 2,
2559	latency_invalid = 255
2560};
2561
2562/**
2563 * e1000_update_itr - update the dynamic ITR value based on statistics
2564 * @adapter: pointer to adapter
2565 * @itr_setting: current adapter->itr
2566 * @packets: the number of packets during this measurement interval
2567 * @bytes: the number of bytes during this measurement interval
2568 *
2569 *      Stores a new ITR value based on packets and byte
2570 *      counts during the last interrupt.  The advantage of per interrupt
2571 *      computation is faster updates and more accurate ITR for the current
2572 *      traffic pattern.  Constants in this function were computed
2573 *      based on theoretical maximum wire speed and thresholds were set based
2574 *      on testing data as well as attempting to minimize response time
2575 *      while increasing bulk throughput.
2576 *      this functionality is controlled by the InterruptThrottleRate module
2577 *      parameter (see e1000_param.c)
2578 **/
2579static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2580				     u16 itr_setting, int packets, int bytes)
2581{
2582	unsigned int retval = itr_setting;
2583	struct e1000_hw *hw = &adapter->hw;
2584
2585	if (unlikely(hw->mac_type < e1000_82540))
2586		goto update_itr_done;
2587
2588	if (packets == 0)
2589		goto update_itr_done;
2590
2591	switch (itr_setting) {
2592	case lowest_latency:
2593		/* jumbo frames get bulk treatment*/
2594		if (bytes/packets > 8000)
2595			retval = bulk_latency;
2596		else if ((packets < 5) && (bytes > 512))
2597			retval = low_latency;
2598		break;
2599	case low_latency:  /* 50 usec aka 20000 ints/s */
2600		if (bytes > 10000) {
2601			/* jumbo frames need bulk latency setting */
2602			if (bytes/packets > 8000)
2603				retval = bulk_latency;
2604			else if ((packets < 10) || ((bytes/packets) > 1200))
2605				retval = bulk_latency;
2606			else if ((packets > 35))
2607				retval = lowest_latency;
2608		} else if (bytes/packets > 2000)
2609			retval = bulk_latency;
2610		else if (packets <= 2 && bytes < 512)
2611			retval = lowest_latency;
2612		break;
2613	case bulk_latency: /* 250 usec aka 4000 ints/s */
2614		if (bytes > 25000) {
2615			if (packets > 35)
2616				retval = low_latency;
2617		} else if (bytes < 6000) {
2618			retval = low_latency;
2619		}
2620		break;
2621	}
2622
2623update_itr_done:
2624	return retval;
2625}
2626
2627static void e1000_set_itr(struct e1000_adapter *adapter)
2628{
2629	struct e1000_hw *hw = &adapter->hw;
2630	u16 current_itr;
2631	u32 new_itr = adapter->itr;
2632
2633	if (unlikely(hw->mac_type < e1000_82540))
2634		return;
2635
2636	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2637	if (unlikely(adapter->link_speed != SPEED_1000)) {
 
2638		new_itr = 4000;
2639		goto set_itr_now;
2640	}
2641
2642	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2643					   adapter->total_tx_packets,
2644					   adapter->total_tx_bytes);
2645	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2646	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2647		adapter->tx_itr = low_latency;
2648
2649	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2650					   adapter->total_rx_packets,
2651					   adapter->total_rx_bytes);
2652	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2653	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2654		adapter->rx_itr = low_latency;
2655
2656	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2657
2658	switch (current_itr) {
2659	/* counts and packets in update_itr are dependent on these numbers */
2660	case lowest_latency:
2661		new_itr = 70000;
2662		break;
2663	case low_latency:
2664		new_itr = 20000; /* aka hwitr = ~200 */
2665		break;
2666	case bulk_latency:
2667		new_itr = 4000;
2668		break;
2669	default:
2670		break;
2671	}
2672
2673set_itr_now:
2674	if (new_itr != adapter->itr) {
2675		/* this attempts to bias the interrupt rate towards Bulk
2676		 * by adding intermediate steps when interrupt rate is
2677		 * increasing
2678		 */
2679		new_itr = new_itr > adapter->itr ?
2680			  min(adapter->itr + (new_itr >> 2), new_itr) :
2681			  new_itr;
2682		adapter->itr = new_itr;
2683		ew32(ITR, 1000000000 / (new_itr * 256));
2684	}
2685}
2686
2687#define E1000_TX_FLAGS_CSUM		0x00000001
2688#define E1000_TX_FLAGS_VLAN		0x00000002
2689#define E1000_TX_FLAGS_TSO		0x00000004
2690#define E1000_TX_FLAGS_IPV4		0x00000008
2691#define E1000_TX_FLAGS_NO_FCS		0x00000010
2692#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2693#define E1000_TX_FLAGS_VLAN_SHIFT	16
2694
2695static int e1000_tso(struct e1000_adapter *adapter,
2696		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2697		     __be16 protocol)
2698{
2699	struct e1000_context_desc *context_desc;
2700	struct e1000_tx_buffer *buffer_info;
2701	unsigned int i;
2702	u32 cmd_length = 0;
2703	u16 ipcse = 0, tucse, mss;
2704	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2705
2706	if (skb_is_gso(skb)) {
2707		int err;
2708
2709		err = skb_cow_head(skb, 0);
2710		if (err < 0)
2711			return err;
2712
2713		hdr_len = skb_tcp_all_headers(skb);
2714		mss = skb_shinfo(skb)->gso_size;
2715		if (protocol == htons(ETH_P_IP)) {
2716			struct iphdr *iph = ip_hdr(skb);
2717			iph->tot_len = 0;
2718			iph->check = 0;
2719			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2720								 iph->daddr, 0,
2721								 IPPROTO_TCP,
2722								 0);
2723			cmd_length = E1000_TXD_CMD_IP;
2724			ipcse = skb_transport_offset(skb) - 1;
2725		} else if (skb_is_gso_v6(skb)) {
2726			tcp_v6_gso_csum_prep(skb);
 
 
 
 
2727			ipcse = 0;
2728		}
2729		ipcss = skb_network_offset(skb);
2730		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2731		tucss = skb_transport_offset(skb);
2732		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2733		tucse = 0;
2734
2735		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2736			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2737
2738		i = tx_ring->next_to_use;
2739		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2740		buffer_info = &tx_ring->buffer_info[i];
2741
2742		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2743		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2744		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2745		context_desc->upper_setup.tcp_fields.tucss = tucss;
2746		context_desc->upper_setup.tcp_fields.tucso = tucso;
2747		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2748		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2749		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2750		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2751
2752		buffer_info->time_stamp = jiffies;
2753		buffer_info->next_to_watch = i;
2754
2755		if (++i == tx_ring->count)
2756			i = 0;
2757
2758		tx_ring->next_to_use = i;
2759
2760		return true;
2761	}
2762	return false;
2763}
2764
2765static bool e1000_tx_csum(struct e1000_adapter *adapter,
2766			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2767			  __be16 protocol)
2768{
2769	struct e1000_context_desc *context_desc;
2770	struct e1000_tx_buffer *buffer_info;
2771	unsigned int i;
2772	u8 css;
2773	u32 cmd_len = E1000_TXD_CMD_DEXT;
2774
2775	if (skb->ip_summed != CHECKSUM_PARTIAL)
2776		return false;
2777
2778	switch (protocol) {
2779	case cpu_to_be16(ETH_P_IP):
2780		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2781			cmd_len |= E1000_TXD_CMD_TCP;
2782		break;
2783	case cpu_to_be16(ETH_P_IPV6):
2784		/* XXX not handling all IPV6 headers */
2785		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2786			cmd_len |= E1000_TXD_CMD_TCP;
2787		break;
2788	default:
2789		if (unlikely(net_ratelimit()))
2790			e_warn(drv, "checksum_partial proto=%x!\n",
2791			       skb->protocol);
2792		break;
2793	}
2794
2795	css = skb_checksum_start_offset(skb);
2796
2797	i = tx_ring->next_to_use;
2798	buffer_info = &tx_ring->buffer_info[i];
2799	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2800
2801	context_desc->lower_setup.ip_config = 0;
2802	context_desc->upper_setup.tcp_fields.tucss = css;
2803	context_desc->upper_setup.tcp_fields.tucso =
2804		css + skb->csum_offset;
2805	context_desc->upper_setup.tcp_fields.tucse = 0;
2806	context_desc->tcp_seg_setup.data = 0;
2807	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2808
2809	buffer_info->time_stamp = jiffies;
2810	buffer_info->next_to_watch = i;
2811
2812	if (unlikely(++i == tx_ring->count))
2813		i = 0;
2814
2815	tx_ring->next_to_use = i;
2816
2817	return true;
2818}
2819
2820#define E1000_MAX_TXD_PWR	12
2821#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2822
2823static int e1000_tx_map(struct e1000_adapter *adapter,
2824			struct e1000_tx_ring *tx_ring,
2825			struct sk_buff *skb, unsigned int first,
2826			unsigned int max_per_txd, unsigned int nr_frags,
2827			unsigned int mss)
2828{
2829	struct e1000_hw *hw = &adapter->hw;
2830	struct pci_dev *pdev = adapter->pdev;
2831	struct e1000_tx_buffer *buffer_info;
2832	unsigned int len = skb_headlen(skb);
2833	unsigned int offset = 0, size, count = 0, i;
2834	unsigned int f, bytecount, segs;
2835
2836	i = tx_ring->next_to_use;
2837
2838	while (len) {
2839		buffer_info = &tx_ring->buffer_info[i];
2840		size = min(len, max_per_txd);
2841		/* Workaround for Controller erratum --
2842		 * descriptor for non-tso packet in a linear SKB that follows a
2843		 * tso gets written back prematurely before the data is fully
2844		 * DMA'd to the controller
2845		 */
2846		if (!skb->data_len && tx_ring->last_tx_tso &&
2847		    !skb_is_gso(skb)) {
2848			tx_ring->last_tx_tso = false;
2849			size -= 4;
2850		}
2851
2852		/* Workaround for premature desc write-backs
2853		 * in TSO mode.  Append 4-byte sentinel desc
2854		 */
2855		if (unlikely(mss && !nr_frags && size == len && size > 8))
2856			size -= 4;
2857		/* work-around for errata 10 and it applies
2858		 * to all controllers in PCI-X mode
2859		 * The fix is to make sure that the first descriptor of a
2860		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2861		 */
2862		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2863			     (size > 2015) && count == 0))
2864			size = 2015;
2865
2866		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2867		 * terminating buffers within evenly-aligned dwords.
2868		 */
2869		if (unlikely(adapter->pcix_82544 &&
2870		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2871		   size > 4))
2872			size -= 4;
2873
2874		buffer_info->length = size;
2875		/* set time_stamp *before* dma to help avoid a possible race */
2876		buffer_info->time_stamp = jiffies;
2877		buffer_info->mapped_as_page = false;
2878		buffer_info->dma = dma_map_single(&pdev->dev,
2879						  skb->data + offset,
2880						  size, DMA_TO_DEVICE);
2881		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2882			goto dma_error;
2883		buffer_info->next_to_watch = i;
2884
2885		len -= size;
2886		offset += size;
2887		count++;
2888		if (len) {
2889			i++;
2890			if (unlikely(i == tx_ring->count))
2891				i = 0;
2892		}
2893	}
2894
2895	for (f = 0; f < nr_frags; f++) {
2896		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2897
 
2898		len = skb_frag_size(frag);
2899		offset = 0;
2900
2901		while (len) {
2902			unsigned long bufend;
2903			i++;
2904			if (unlikely(i == tx_ring->count))
2905				i = 0;
2906
2907			buffer_info = &tx_ring->buffer_info[i];
2908			size = min(len, max_per_txd);
2909			/* Workaround for premature desc write-backs
2910			 * in TSO mode.  Append 4-byte sentinel desc
2911			 */
2912			if (unlikely(mss && f == (nr_frags-1) &&
2913			    size == len && size > 8))
2914				size -= 4;
2915			/* Workaround for potential 82544 hang in PCI-X.
2916			 * Avoid terminating buffers within evenly-aligned
2917			 * dwords.
2918			 */
2919			bufend = (unsigned long)
2920				page_to_phys(skb_frag_page(frag));
2921			bufend += offset + size - 1;
2922			if (unlikely(adapter->pcix_82544 &&
2923				     !(bufend & 4) &&
2924				     size > 4))
2925				size -= 4;
2926
2927			buffer_info->length = size;
2928			buffer_info->time_stamp = jiffies;
2929			buffer_info->mapped_as_page = true;
2930			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2931						offset, size, DMA_TO_DEVICE);
2932			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2933				goto dma_error;
2934			buffer_info->next_to_watch = i;
2935
2936			len -= size;
2937			offset += size;
2938			count++;
2939		}
2940	}
2941
2942	segs = skb_shinfo(skb)->gso_segs ?: 1;
2943	/* multiply data chunks by size of headers */
2944	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2945
2946	tx_ring->buffer_info[i].skb = skb;
2947	tx_ring->buffer_info[i].segs = segs;
2948	tx_ring->buffer_info[i].bytecount = bytecount;
2949	tx_ring->buffer_info[first].next_to_watch = i;
2950
2951	return count;
2952
2953dma_error:
2954	dev_err(&pdev->dev, "TX DMA map failed\n");
2955	buffer_info->dma = 0;
2956	if (count)
2957		count--;
2958
2959	while (count--) {
2960		if (i == 0)
2961			i += tx_ring->count;
2962		i--;
2963		buffer_info = &tx_ring->buffer_info[i];
2964		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2965	}
2966
2967	return 0;
2968}
2969
2970static void e1000_tx_queue(struct e1000_adapter *adapter,
2971			   struct e1000_tx_ring *tx_ring, int tx_flags,
2972			   int count)
2973{
2974	struct e1000_tx_desc *tx_desc = NULL;
2975	struct e1000_tx_buffer *buffer_info;
2976	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2977	unsigned int i;
2978
2979	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2980		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2981			     E1000_TXD_CMD_TSE;
2982		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2983
2984		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2985			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2986	}
2987
2988	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2989		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2990		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2991	}
2992
2993	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2994		txd_lower |= E1000_TXD_CMD_VLE;
2995		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2996	}
2997
2998	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2999		txd_lower &= ~(E1000_TXD_CMD_IFCS);
3000
3001	i = tx_ring->next_to_use;
3002
3003	while (count--) {
3004		buffer_info = &tx_ring->buffer_info[i];
3005		tx_desc = E1000_TX_DESC(*tx_ring, i);
3006		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3007		tx_desc->lower.data =
3008			cpu_to_le32(txd_lower | buffer_info->length);
3009		tx_desc->upper.data = cpu_to_le32(txd_upper);
3010		if (unlikely(++i == tx_ring->count))
3011			i = 0;
3012	}
3013
3014	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3015
3016	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3017	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3018		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3019
3020	/* Force memory writes to complete before letting h/w
3021	 * know there are new descriptors to fetch.  (Only
3022	 * applicable for weak-ordered memory model archs,
3023	 * such as IA-64).
3024	 */
3025	dma_wmb();
3026
3027	tx_ring->next_to_use = i;
3028}
3029
3030/* 82547 workaround to avoid controller hang in half-duplex environment.
3031 * The workaround is to avoid queuing a large packet that would span
3032 * the internal Tx FIFO ring boundary by notifying the stack to resend
3033 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3034 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3035 * to the beginning of the Tx FIFO.
3036 */
3037
3038#define E1000_FIFO_HDR			0x10
3039#define E1000_82547_PAD_LEN		0x3E0
3040
3041static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3042				       struct sk_buff *skb)
3043{
3044	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3045	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3046
3047	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3048
3049	if (adapter->link_duplex != HALF_DUPLEX)
3050		goto no_fifo_stall_required;
3051
3052	if (atomic_read(&adapter->tx_fifo_stall))
3053		return 1;
3054
3055	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3056		atomic_set(&adapter->tx_fifo_stall, 1);
3057		return 1;
3058	}
3059
3060no_fifo_stall_required:
3061	adapter->tx_fifo_head += skb_fifo_len;
3062	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3063		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3064	return 0;
3065}
3066
3067static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3068{
3069	struct e1000_adapter *adapter = netdev_priv(netdev);
3070	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3071
3072	netif_stop_queue(netdev);
3073	/* Herbert's original patch had:
3074	 *  smp_mb__after_netif_stop_queue();
3075	 * but since that doesn't exist yet, just open code it.
3076	 */
3077	smp_mb();
3078
3079	/* We need to check again in a case another CPU has just
3080	 * made room available.
3081	 */
3082	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3083		return -EBUSY;
3084
3085	/* A reprieve! */
3086	netif_start_queue(netdev);
3087	++adapter->restart_queue;
3088	return 0;
3089}
3090
3091static int e1000_maybe_stop_tx(struct net_device *netdev,
3092			       struct e1000_tx_ring *tx_ring, int size)
3093{
3094	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3095		return 0;
3096	return __e1000_maybe_stop_tx(netdev, size);
3097}
3098
3099#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3100static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3101				    struct net_device *netdev)
3102{
3103	struct e1000_adapter *adapter = netdev_priv(netdev);
3104	struct e1000_hw *hw = &adapter->hw;
3105	struct e1000_tx_ring *tx_ring;
3106	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3107	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3108	unsigned int tx_flags = 0;
3109	unsigned int len = skb_headlen(skb);
3110	unsigned int nr_frags;
3111	unsigned int mss;
3112	int count = 0;
3113	int tso;
3114	unsigned int f;
3115	__be16 protocol = vlan_get_protocol(skb);
3116
3117	/* This goes back to the question of how to logically map a Tx queue
3118	 * to a flow.  Right now, performance is impacted slightly negatively
3119	 * if using multiple Tx queues.  If the stack breaks away from a
3120	 * single qdisc implementation, we can look at this again.
3121	 */
3122	tx_ring = adapter->tx_ring;
3123
3124	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3125	 * packets may get corrupted during padding by HW.
3126	 * To WA this issue, pad all small packets manually.
3127	 */
3128	if (eth_skb_pad(skb))
3129		return NETDEV_TX_OK;
3130
3131	mss = skb_shinfo(skb)->gso_size;
3132	/* The controller does a simple calculation to
3133	 * make sure there is enough room in the FIFO before
3134	 * initiating the DMA for each buffer.  The calc is:
3135	 * 4 = ceil(buffer len/mss).  To make sure we don't
3136	 * overrun the FIFO, adjust the max buffer len if mss
3137	 * drops.
3138	 */
3139	if (mss) {
3140		u8 hdr_len;
3141		max_per_txd = min(mss << 2, max_per_txd);
3142		max_txd_pwr = fls(max_per_txd) - 1;
3143
3144		hdr_len = skb_tcp_all_headers(skb);
3145		if (skb->data_len && hdr_len == len) {
3146			switch (hw->mac_type) {
3147			case e1000_82544: {
3148				unsigned int pull_size;
3149
3150				/* Make sure we have room to chop off 4 bytes,
3151				 * and that the end alignment will work out to
3152				 * this hardware's requirements
3153				 * NOTE: this is a TSO only workaround
3154				 * if end byte alignment not correct move us
3155				 * into the next dword
3156				 */
3157				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3158				    & 4)
3159					break;
 
3160				pull_size = min((unsigned int)4, skb->data_len);
3161				if (!__pskb_pull_tail(skb, pull_size)) {
3162					e_err(drv, "__pskb_pull_tail "
3163					      "failed.\n");
3164					dev_kfree_skb_any(skb);
3165					return NETDEV_TX_OK;
3166				}
3167				len = skb_headlen(skb);
3168				break;
3169			}
3170			default:
3171				/* do nothing */
3172				break;
3173			}
3174		}
3175	}
3176
3177	/* reserve a descriptor for the offload context */
3178	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3179		count++;
3180	count++;
3181
3182	/* Controller Erratum workaround */
3183	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3184		count++;
3185
3186	count += TXD_USE_COUNT(len, max_txd_pwr);
3187
3188	if (adapter->pcix_82544)
3189		count++;
3190
3191	/* work-around for errata 10 and it applies to all controllers
3192	 * in PCI-X mode, so add one more descriptor to the count
3193	 */
3194	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3195			(len > 2015)))
3196		count++;
3197
3198	nr_frags = skb_shinfo(skb)->nr_frags;
3199	for (f = 0; f < nr_frags; f++)
3200		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3201				       max_txd_pwr);
3202	if (adapter->pcix_82544)
3203		count += nr_frags;
3204
3205	/* need: count + 2 desc gap to keep tail from touching
3206	 * head, otherwise try next time
3207	 */
3208	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3209		return NETDEV_TX_BUSY;
3210
3211	if (unlikely((hw->mac_type == e1000_82547) &&
3212		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3213		netif_stop_queue(netdev);
3214		if (!test_bit(__E1000_DOWN, &adapter->flags))
3215			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3216		return NETDEV_TX_BUSY;
3217	}
3218
3219	if (skb_vlan_tag_present(skb)) {
3220		tx_flags |= E1000_TX_FLAGS_VLAN;
3221		tx_flags |= (skb_vlan_tag_get(skb) <<
3222			     E1000_TX_FLAGS_VLAN_SHIFT);
3223	}
3224
3225	first = tx_ring->next_to_use;
3226
3227	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3228	if (tso < 0) {
3229		dev_kfree_skb_any(skb);
3230		return NETDEV_TX_OK;
3231	}
3232
3233	if (likely(tso)) {
3234		if (likely(hw->mac_type != e1000_82544))
3235			tx_ring->last_tx_tso = true;
3236		tx_flags |= E1000_TX_FLAGS_TSO;
3237	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3238		tx_flags |= E1000_TX_FLAGS_CSUM;
3239
3240	if (protocol == htons(ETH_P_IP))
3241		tx_flags |= E1000_TX_FLAGS_IPV4;
3242
3243	if (unlikely(skb->no_fcs))
3244		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3245
3246	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3247			     nr_frags, mss);
3248
3249	if (count) {
3250		/* The descriptors needed is higher than other Intel drivers
3251		 * due to a number of workarounds.  The breakdown is below:
3252		 * Data descriptors: MAX_SKB_FRAGS + 1
3253		 * Context Descriptor: 1
3254		 * Keep head from touching tail: 2
3255		 * Workarounds: 3
3256		 */
3257		int desc_needed = MAX_SKB_FRAGS + 7;
3258
3259		netdev_sent_queue(netdev, skb->len);
3260		skb_tx_timestamp(skb);
3261
3262		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3263
3264		/* 82544 potentially requires twice as many data descriptors
3265		 * in order to guarantee buffers don't end on evenly-aligned
3266		 * dwords
3267		 */
3268		if (adapter->pcix_82544)
3269			desc_needed += MAX_SKB_FRAGS + 1;
3270
3271		/* Make sure there is space in the ring for the next send. */
3272		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3273
3274		if (!netdev_xmit_more() ||
3275		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3276			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
 
 
 
 
 
3277		}
3278	} else {
3279		dev_kfree_skb_any(skb);
3280		tx_ring->buffer_info[first].time_stamp = 0;
3281		tx_ring->next_to_use = first;
3282	}
3283
3284	return NETDEV_TX_OK;
3285}
3286
3287#define NUM_REGS 38 /* 1 based count */
3288static void e1000_regdump(struct e1000_adapter *adapter)
3289{
3290	struct e1000_hw *hw = &adapter->hw;
3291	u32 regs[NUM_REGS];
3292	u32 *regs_buff = regs;
3293	int i = 0;
3294
3295	static const char * const reg_name[] = {
3296		"CTRL",  "STATUS",
3297		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3298		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3299		"TIDV", "TXDCTL", "TADV", "TARC0",
3300		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3301		"TXDCTL1", "TARC1",
3302		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3303		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3304		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3305	};
3306
3307	regs_buff[0]  = er32(CTRL);
3308	regs_buff[1]  = er32(STATUS);
3309
3310	regs_buff[2]  = er32(RCTL);
3311	regs_buff[3]  = er32(RDLEN);
3312	regs_buff[4]  = er32(RDH);
3313	regs_buff[5]  = er32(RDT);
3314	regs_buff[6]  = er32(RDTR);
3315
3316	regs_buff[7]  = er32(TCTL);
3317	regs_buff[8]  = er32(TDBAL);
3318	regs_buff[9]  = er32(TDBAH);
3319	regs_buff[10] = er32(TDLEN);
3320	regs_buff[11] = er32(TDH);
3321	regs_buff[12] = er32(TDT);
3322	regs_buff[13] = er32(TIDV);
3323	regs_buff[14] = er32(TXDCTL);
3324	regs_buff[15] = er32(TADV);
3325	regs_buff[16] = er32(TARC0);
3326
3327	regs_buff[17] = er32(TDBAL1);
3328	regs_buff[18] = er32(TDBAH1);
3329	regs_buff[19] = er32(TDLEN1);
3330	regs_buff[20] = er32(TDH1);
3331	regs_buff[21] = er32(TDT1);
3332	regs_buff[22] = er32(TXDCTL1);
3333	regs_buff[23] = er32(TARC1);
3334	regs_buff[24] = er32(CTRL_EXT);
3335	regs_buff[25] = er32(ERT);
3336	regs_buff[26] = er32(RDBAL0);
3337	regs_buff[27] = er32(RDBAH0);
3338	regs_buff[28] = er32(TDFH);
3339	regs_buff[29] = er32(TDFT);
3340	regs_buff[30] = er32(TDFHS);
3341	regs_buff[31] = er32(TDFTS);
3342	regs_buff[32] = er32(TDFPC);
3343	regs_buff[33] = er32(RDFH);
3344	regs_buff[34] = er32(RDFT);
3345	regs_buff[35] = er32(RDFHS);
3346	regs_buff[36] = er32(RDFTS);
3347	regs_buff[37] = er32(RDFPC);
3348
3349	pr_info("Register dump\n");
3350	for (i = 0; i < NUM_REGS; i++)
3351		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3352}
3353
3354/*
3355 * e1000_dump: Print registers, tx ring and rx ring
3356 */
3357static void e1000_dump(struct e1000_adapter *adapter)
3358{
3359	/* this code doesn't handle multiple rings */
3360	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3361	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3362	int i;
3363
3364	if (!netif_msg_hw(adapter))
3365		return;
3366
3367	/* Print Registers */
3368	e1000_regdump(adapter);
3369
3370	/* transmit dump */
3371	pr_info("TX Desc ring0 dump\n");
3372
3373	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3374	 *
3375	 * Legacy Transmit Descriptor
3376	 *   +--------------------------------------------------------------+
3377	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3378	 *   +--------------------------------------------------------------+
3379	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3380	 *   +--------------------------------------------------------------+
3381	 *   63       48 47        36 35    32 31     24 23    16 15        0
3382	 *
3383	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3384	 *   63      48 47    40 39       32 31             16 15    8 7      0
3385	 *   +----------------------------------------------------------------+
3386	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3387	 *   +----------------------------------------------------------------+
3388	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3389	 *   +----------------------------------------------------------------+
3390	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3391	 *
3392	 * Extended Data Descriptor (DTYP=0x1)
3393	 *   +----------------------------------------------------------------+
3394	 * 0 |                     Buffer Address [63:0]                      |
3395	 *   +----------------------------------------------------------------+
3396	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3397	 *   +----------------------------------------------------------------+
3398	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3399	 */
3400	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3401	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3402
3403	if (!netif_msg_tx_done(adapter))
3404		goto rx_ring_summary;
3405
3406	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3407		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3408		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3409		struct my_u { __le64 a; __le64 b; };
3410		struct my_u *u = (struct my_u *)tx_desc;
3411		const char *type;
3412
3413		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3414			type = "NTC/U";
3415		else if (i == tx_ring->next_to_use)
3416			type = "NTU";
3417		else if (i == tx_ring->next_to_clean)
3418			type = "NTC";
3419		else
3420			type = "";
3421
3422		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3423			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3424			le64_to_cpu(u->a), le64_to_cpu(u->b),
3425			(u64)buffer_info->dma, buffer_info->length,
3426			buffer_info->next_to_watch,
3427			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3428	}
3429
3430rx_ring_summary:
3431	/* receive dump */
3432	pr_info("\nRX Desc ring dump\n");
3433
3434	/* Legacy Receive Descriptor Format
3435	 *
3436	 * +-----------------------------------------------------+
3437	 * |                Buffer Address [63:0]                |
3438	 * +-----------------------------------------------------+
3439	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3440	 * +-----------------------------------------------------+
3441	 * 63       48 47    40 39      32 31         16 15      0
3442	 */
3443	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3444
3445	if (!netif_msg_rx_status(adapter))
3446		goto exit;
3447
3448	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3449		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3450		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3451		struct my_u { __le64 a; __le64 b; };
3452		struct my_u *u = (struct my_u *)rx_desc;
3453		const char *type;
3454
3455		if (i == rx_ring->next_to_use)
3456			type = "NTU";
3457		else if (i == rx_ring->next_to_clean)
3458			type = "NTC";
3459		else
3460			type = "";
3461
3462		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3463			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3464			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3465	} /* for */
3466
3467	/* dump the descriptor caches */
3468	/* rx */
3469	pr_info("Rx descriptor cache in 64bit format\n");
3470	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3471		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3472			i,
3473			readl(adapter->hw.hw_addr + i+4),
3474			readl(adapter->hw.hw_addr + i),
3475			readl(adapter->hw.hw_addr + i+12),
3476			readl(adapter->hw.hw_addr + i+8));
3477	}
3478	/* tx */
3479	pr_info("Tx descriptor cache in 64bit format\n");
3480	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3481		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3482			i,
3483			readl(adapter->hw.hw_addr + i+4),
3484			readl(adapter->hw.hw_addr + i),
3485			readl(adapter->hw.hw_addr + i+12),
3486			readl(adapter->hw.hw_addr + i+8));
3487	}
3488exit:
3489	return;
3490}
3491
3492/**
3493 * e1000_tx_timeout - Respond to a Tx Hang
3494 * @netdev: network interface device structure
3495 * @txqueue: number of the Tx queue that hung (unused)
3496 **/
3497static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3498{
3499	struct e1000_adapter *adapter = netdev_priv(netdev);
3500
3501	/* Do the reset outside of interrupt context */
3502	adapter->tx_timeout_count++;
3503	schedule_work(&adapter->reset_task);
3504}
3505
3506static void e1000_reset_task(struct work_struct *work)
3507{
3508	struct e1000_adapter *adapter =
3509		container_of(work, struct e1000_adapter, reset_task);
3510
3511	e_err(drv, "Reset adapter\n");
3512	rtnl_lock();
3513	e1000_reinit_locked(adapter);
3514	rtnl_unlock();
3515}
3516
3517/**
3518 * e1000_change_mtu - Change the Maximum Transfer Unit
3519 * @netdev: network interface device structure
3520 * @new_mtu: new value for maximum frame size
3521 *
3522 * Returns 0 on success, negative on failure
3523 **/
3524static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3525{
3526	struct e1000_adapter *adapter = netdev_priv(netdev);
3527	struct e1000_hw *hw = &adapter->hw;
3528	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3529
3530	/* Adapter-specific max frame size limits. */
3531	switch (hw->mac_type) {
3532	case e1000_undefined ... e1000_82542_rev2_1:
3533		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3534			e_err(probe, "Jumbo Frames not supported.\n");
3535			return -EINVAL;
3536		}
3537		break;
3538	default:
3539		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3540		break;
3541	}
3542
3543	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3544		msleep(1);
3545	/* e1000_down has a dependency on max_frame_size */
3546	hw->max_frame_size = max_frame;
3547	if (netif_running(netdev)) {
3548		/* prevent buffers from being reallocated */
3549		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3550		e1000_down(adapter);
3551	}
3552
3553	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3554	 * means we reserve 2 more, this pushes us to allocate from the next
3555	 * larger slab size.
3556	 * i.e. RXBUFFER_2048 --> size-4096 slab
3557	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3558	 * fragmented skbs
3559	 */
3560
3561	if (max_frame <= E1000_RXBUFFER_2048)
3562		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3563	else
3564#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3565		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3566#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3567		adapter->rx_buffer_len = PAGE_SIZE;
3568#endif
3569
3570	/* adjust allocation if LPE protects us, and we aren't using SBP */
3571	if (!hw->tbi_compatibility_on &&
3572	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3573	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3574		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3575
3576	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3577		   netdev->mtu, new_mtu);
3578	WRITE_ONCE(netdev->mtu, new_mtu);
3579
3580	if (netif_running(netdev))
3581		e1000_up(adapter);
3582	else
3583		e1000_reset(adapter);
3584
3585	clear_bit(__E1000_RESETTING, &adapter->flags);
3586
3587	return 0;
3588}
3589
3590/**
3591 * e1000_update_stats - Update the board statistics counters
3592 * @adapter: board private structure
3593 **/
3594void e1000_update_stats(struct e1000_adapter *adapter)
3595{
3596	struct net_device *netdev = adapter->netdev;
3597	struct e1000_hw *hw = &adapter->hw;
3598	struct pci_dev *pdev = adapter->pdev;
3599	unsigned long flags;
3600	u16 phy_tmp;
3601
3602#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3603
3604	/* Prevent stats update while adapter is being reset, or if the pci
3605	 * connection is down.
3606	 */
3607	if (adapter->link_speed == 0)
3608		return;
3609	if (pci_channel_offline(pdev))
3610		return;
3611
3612	spin_lock_irqsave(&adapter->stats_lock, flags);
3613
3614	/* these counters are modified from e1000_tbi_adjust_stats,
3615	 * called from the interrupt context, so they must only
3616	 * be written while holding adapter->stats_lock
3617	 */
3618
3619	adapter->stats.crcerrs += er32(CRCERRS);
3620	adapter->stats.gprc += er32(GPRC);
3621	adapter->stats.gorcl += er32(GORCL);
3622	adapter->stats.gorch += er32(GORCH);
3623	adapter->stats.bprc += er32(BPRC);
3624	adapter->stats.mprc += er32(MPRC);
3625	adapter->stats.roc += er32(ROC);
3626
3627	adapter->stats.prc64 += er32(PRC64);
3628	adapter->stats.prc127 += er32(PRC127);
3629	adapter->stats.prc255 += er32(PRC255);
3630	adapter->stats.prc511 += er32(PRC511);
3631	adapter->stats.prc1023 += er32(PRC1023);
3632	adapter->stats.prc1522 += er32(PRC1522);
3633
3634	adapter->stats.symerrs += er32(SYMERRS);
3635	adapter->stats.mpc += er32(MPC);
3636	adapter->stats.scc += er32(SCC);
3637	adapter->stats.ecol += er32(ECOL);
3638	adapter->stats.mcc += er32(MCC);
3639	adapter->stats.latecol += er32(LATECOL);
3640	adapter->stats.dc += er32(DC);
3641	adapter->stats.sec += er32(SEC);
3642	adapter->stats.rlec += er32(RLEC);
3643	adapter->stats.xonrxc += er32(XONRXC);
3644	adapter->stats.xontxc += er32(XONTXC);
3645	adapter->stats.xoffrxc += er32(XOFFRXC);
3646	adapter->stats.xofftxc += er32(XOFFTXC);
3647	adapter->stats.fcruc += er32(FCRUC);
3648	adapter->stats.gptc += er32(GPTC);
3649	adapter->stats.gotcl += er32(GOTCL);
3650	adapter->stats.gotch += er32(GOTCH);
3651	adapter->stats.rnbc += er32(RNBC);
3652	adapter->stats.ruc += er32(RUC);
3653	adapter->stats.rfc += er32(RFC);
3654	adapter->stats.rjc += er32(RJC);
3655	adapter->stats.torl += er32(TORL);
3656	adapter->stats.torh += er32(TORH);
3657	adapter->stats.totl += er32(TOTL);
3658	adapter->stats.toth += er32(TOTH);
3659	adapter->stats.tpr += er32(TPR);
3660
3661	adapter->stats.ptc64 += er32(PTC64);
3662	adapter->stats.ptc127 += er32(PTC127);
3663	adapter->stats.ptc255 += er32(PTC255);
3664	adapter->stats.ptc511 += er32(PTC511);
3665	adapter->stats.ptc1023 += er32(PTC1023);
3666	adapter->stats.ptc1522 += er32(PTC1522);
3667
3668	adapter->stats.mptc += er32(MPTC);
3669	adapter->stats.bptc += er32(BPTC);
3670
3671	/* used for adaptive IFS */
3672
3673	hw->tx_packet_delta = er32(TPT);
3674	adapter->stats.tpt += hw->tx_packet_delta;
3675	hw->collision_delta = er32(COLC);
3676	adapter->stats.colc += hw->collision_delta;
3677
3678	if (hw->mac_type >= e1000_82543) {
3679		adapter->stats.algnerrc += er32(ALGNERRC);
3680		adapter->stats.rxerrc += er32(RXERRC);
3681		adapter->stats.tncrs += er32(TNCRS);
3682		adapter->stats.cexterr += er32(CEXTERR);
3683		adapter->stats.tsctc += er32(TSCTC);
3684		adapter->stats.tsctfc += er32(TSCTFC);
3685	}
3686
3687	/* Fill out the OS statistics structure */
3688	netdev->stats.multicast = adapter->stats.mprc;
3689	netdev->stats.collisions = adapter->stats.colc;
3690
3691	/* Rx Errors */
3692
3693	/* RLEC on some newer hardware can be incorrect so build
3694	 * our own version based on RUC and ROC
3695	 */
3696	netdev->stats.rx_errors = adapter->stats.rxerrc +
3697		adapter->stats.crcerrs + adapter->stats.algnerrc +
3698		adapter->stats.ruc + adapter->stats.roc +
3699		adapter->stats.cexterr;
3700	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3701	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3702	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3703	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3704	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3705
3706	/* Tx Errors */
3707	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3708	netdev->stats.tx_errors = adapter->stats.txerrc;
3709	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3710	netdev->stats.tx_window_errors = adapter->stats.latecol;
3711	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3712	if (hw->bad_tx_carr_stats_fd &&
3713	    adapter->link_duplex == FULL_DUPLEX) {
3714		netdev->stats.tx_carrier_errors = 0;
3715		adapter->stats.tncrs = 0;
3716	}
3717
3718	/* Tx Dropped needs to be maintained elsewhere */
3719
3720	/* Phy Stats */
3721	if (hw->media_type == e1000_media_type_copper) {
3722		if ((adapter->link_speed == SPEED_1000) &&
3723		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3724			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3725			adapter->phy_stats.idle_errors += phy_tmp;
3726		}
3727
3728		if ((hw->mac_type <= e1000_82546) &&
3729		   (hw->phy_type == e1000_phy_m88) &&
3730		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3731			adapter->phy_stats.receive_errors += phy_tmp;
3732	}
3733
3734	/* Management Stats */
3735	if (hw->has_smbus) {
3736		adapter->stats.mgptc += er32(MGTPTC);
3737		adapter->stats.mgprc += er32(MGTPRC);
3738		adapter->stats.mgpdc += er32(MGTPDC);
3739	}
3740
3741	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3742}
3743
3744/**
3745 * e1000_intr - Interrupt Handler
3746 * @irq: interrupt number
3747 * @data: pointer to a network interface device structure
3748 **/
3749static irqreturn_t e1000_intr(int irq, void *data)
3750{
3751	struct net_device *netdev = data;
3752	struct e1000_adapter *adapter = netdev_priv(netdev);
3753	struct e1000_hw *hw = &adapter->hw;
3754	u32 icr = er32(ICR);
3755
3756	if (unlikely((!icr)))
3757		return IRQ_NONE;  /* Not our interrupt */
3758
3759	/* we might have caused the interrupt, but the above
3760	 * read cleared it, and just in case the driver is
3761	 * down there is nothing to do so return handled
3762	 */
3763	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3764		return IRQ_HANDLED;
3765
3766	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3767		hw->get_link_status = 1;
3768		/* guard against interrupt when we're going down */
3769		if (!test_bit(__E1000_DOWN, &adapter->flags))
3770			schedule_delayed_work(&adapter->watchdog_task, 1);
3771	}
3772
3773	/* disable interrupts, without the synchronize_irq bit */
3774	ew32(IMC, ~0);
3775	E1000_WRITE_FLUSH();
3776
3777	if (likely(napi_schedule_prep(&adapter->napi))) {
3778		adapter->total_tx_bytes = 0;
3779		adapter->total_tx_packets = 0;
3780		adapter->total_rx_bytes = 0;
3781		adapter->total_rx_packets = 0;
3782		__napi_schedule(&adapter->napi);
3783	} else {
3784		/* this really should not happen! if it does it is basically a
3785		 * bug, but not a hard error, so enable ints and continue
3786		 */
3787		if (!test_bit(__E1000_DOWN, &adapter->flags))
3788			e1000_irq_enable(adapter);
3789	}
3790
3791	return IRQ_HANDLED;
3792}
3793
3794/**
3795 * e1000_clean - NAPI Rx polling callback
3796 * @napi: napi struct containing references to driver info
3797 * @budget: budget given to driver for receive packets
3798 **/
3799static int e1000_clean(struct napi_struct *napi, int budget)
3800{
3801	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3802						     napi);
3803	int tx_clean_complete = 0, work_done = 0;
3804
3805	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3806
3807	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3808
3809	if (!tx_clean_complete || work_done == budget)
3810		return budget;
3811
3812	/* Exit the polling mode, but don't re-enable interrupts if stack might
3813	 * poll us due to busy-polling
3814	 */
3815	if (likely(napi_complete_done(napi, work_done))) {
3816		if (likely(adapter->itr_setting & 3))
3817			e1000_set_itr(adapter);
 
3818		if (!test_bit(__E1000_DOWN, &adapter->flags))
3819			e1000_irq_enable(adapter);
3820	}
3821
3822	return work_done;
3823}
3824
3825/**
3826 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3827 * @adapter: board private structure
3828 * @tx_ring: ring to clean
3829 **/
3830static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3831			       struct e1000_tx_ring *tx_ring)
3832{
3833	struct e1000_hw *hw = &adapter->hw;
3834	struct net_device *netdev = adapter->netdev;
3835	struct e1000_tx_desc *tx_desc, *eop_desc;
3836	struct e1000_tx_buffer *buffer_info;
3837	unsigned int i, eop;
3838	unsigned int count = 0;
3839	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3840	unsigned int bytes_compl = 0, pkts_compl = 0;
3841
3842	i = tx_ring->next_to_clean;
3843	eop = tx_ring->buffer_info[i].next_to_watch;
3844	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3845
3846	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3847	       (count < tx_ring->count)) {
3848		bool cleaned = false;
3849		dma_rmb();	/* read buffer_info after eop_desc */
3850		for ( ; !cleaned; count++) {
3851			tx_desc = E1000_TX_DESC(*tx_ring, i);
3852			buffer_info = &tx_ring->buffer_info[i];
3853			cleaned = (i == eop);
3854
3855			if (cleaned) {
3856				total_tx_packets += buffer_info->segs;
3857				total_tx_bytes += buffer_info->bytecount;
3858				if (buffer_info->skb) {
3859					bytes_compl += buffer_info->skb->len;
3860					pkts_compl++;
3861				}
3862
3863			}
3864			e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3865							 64);
3866			tx_desc->upper.data = 0;
3867
3868			if (unlikely(++i == tx_ring->count))
3869				i = 0;
3870		}
3871
3872		eop = tx_ring->buffer_info[i].next_to_watch;
3873		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3874	}
3875
3876	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3877	 * which will reuse the cleaned buffers.
3878	 */
3879	smp_store_release(&tx_ring->next_to_clean, i);
3880
3881	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3882
3883#define TX_WAKE_THRESHOLD 32
3884	if (unlikely(count && netif_carrier_ok(netdev) &&
3885		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3886		/* Make sure that anybody stopping the queue after this
3887		 * sees the new next_to_clean.
3888		 */
3889		smp_mb();
3890
3891		if (netif_queue_stopped(netdev) &&
3892		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3893			netif_wake_queue(netdev);
3894			++adapter->restart_queue;
3895		}
3896	}
3897
3898	if (adapter->detect_tx_hung) {
3899		/* Detect a transmit hang in hardware, this serializes the
3900		 * check with the clearing of time_stamp and movement of i
3901		 */
3902		adapter->detect_tx_hung = false;
3903		if (tx_ring->buffer_info[eop].time_stamp &&
3904		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3905			       (adapter->tx_timeout_factor * HZ)) &&
3906		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3907
3908			/* detected Tx unit hang */
3909			e_err(drv, "Detected Tx Unit Hang\n"
3910			      "  Tx Queue             <%lu>\n"
3911			      "  TDH                  <%x>\n"
3912			      "  TDT                  <%x>\n"
3913			      "  next_to_use          <%x>\n"
3914			      "  next_to_clean        <%x>\n"
3915			      "buffer_info[next_to_clean]\n"
3916			      "  time_stamp           <%lx>\n"
3917			      "  next_to_watch        <%x>\n"
3918			      "  jiffies              <%lx>\n"
3919			      "  next_to_watch.status <%x>\n",
3920				(unsigned long)(tx_ring - adapter->tx_ring),
3921				readl(hw->hw_addr + tx_ring->tdh),
3922				readl(hw->hw_addr + tx_ring->tdt),
3923				tx_ring->next_to_use,
3924				tx_ring->next_to_clean,
3925				tx_ring->buffer_info[eop].time_stamp,
3926				eop,
3927				jiffies,
3928				eop_desc->upper.fields.status);
3929			e1000_dump(adapter);
3930			netif_stop_queue(netdev);
3931		}
3932	}
3933	adapter->total_tx_bytes += total_tx_bytes;
3934	adapter->total_tx_packets += total_tx_packets;
3935	netdev->stats.tx_bytes += total_tx_bytes;
3936	netdev->stats.tx_packets += total_tx_packets;
3937	return count < tx_ring->count;
3938}
3939
3940/**
3941 * e1000_rx_checksum - Receive Checksum Offload for 82543
3942 * @adapter:     board private structure
3943 * @status_err:  receive descriptor status and error fields
3944 * @csum:        receive descriptor csum field
3945 * @skb:         socket buffer with received data
3946 **/
3947static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3948			      u32 csum, struct sk_buff *skb)
3949{
3950	struct e1000_hw *hw = &adapter->hw;
3951	u16 status = (u16)status_err;
3952	u8 errors = (u8)(status_err >> 24);
3953
3954	skb_checksum_none_assert(skb);
3955
3956	/* 82543 or newer only */
3957	if (unlikely(hw->mac_type < e1000_82543))
3958		return;
3959	/* Ignore Checksum bit is set */
3960	if (unlikely(status & E1000_RXD_STAT_IXSM))
3961		return;
3962	/* TCP/UDP checksum error bit is set */
3963	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3964		/* let the stack verify checksum errors */
3965		adapter->hw_csum_err++;
3966		return;
3967	}
3968	/* TCP/UDP Checksum has not been calculated */
3969	if (!(status & E1000_RXD_STAT_TCPCS))
3970		return;
3971
3972	/* It must be a TCP or UDP packet with a valid checksum */
3973	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3974		/* TCP checksum is good */
3975		skb->ip_summed = CHECKSUM_UNNECESSARY;
3976	}
3977	adapter->hw_csum_good++;
3978}
3979
3980/**
3981 * e1000_consume_page - helper function for jumbo Rx path
3982 * @bi: software descriptor shadow data
3983 * @skb: skb being modified
3984 * @length: length of data being added
3985 **/
3986static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3987			       u16 length)
3988{
3989	bi->rxbuf.page = NULL;
3990	skb->len += length;
3991	skb->data_len += length;
3992	skb->truesize += PAGE_SIZE;
3993}
3994
3995/**
3996 * e1000_receive_skb - helper function to handle rx indications
3997 * @adapter: board private structure
3998 * @status: descriptor status field as written by hardware
3999 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4000 * @skb: pointer to sk_buff to be indicated to stack
4001 */
4002static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4003			      __le16 vlan, struct sk_buff *skb)
4004{
4005	skb->protocol = eth_type_trans(skb, adapter->netdev);
4006
4007	if (status & E1000_RXD_STAT_VP) {
4008		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4009
4010		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4011	}
4012	napi_gro_receive(&adapter->napi, skb);
4013}
4014
4015/**
4016 * e1000_tbi_adjust_stats
4017 * @hw: Struct containing variables accessed by shared code
4018 * @stats: point to stats struct
4019 * @frame_len: The length of the frame in question
4020 * @mac_addr: The Ethernet destination address of the frame in question
4021 *
4022 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4023 */
4024static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4025				   struct e1000_hw_stats *stats,
4026				   u32 frame_len, const u8 *mac_addr)
4027{
4028	u64 carry_bit;
4029
4030	/* First adjust the frame length. */
4031	frame_len--;
4032	/* We need to adjust the statistics counters, since the hardware
4033	 * counters overcount this packet as a CRC error and undercount
4034	 * the packet as a good packet
4035	 */
4036	/* This packet should not be counted as a CRC error. */
4037	stats->crcerrs--;
4038	/* This packet does count as a Good Packet Received. */
4039	stats->gprc++;
4040
4041	/* Adjust the Good Octets received counters */
4042	carry_bit = 0x80000000 & stats->gorcl;
4043	stats->gorcl += frame_len;
4044	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4045	 * Received Count) was one before the addition,
4046	 * AND it is zero after, then we lost the carry out,
4047	 * need to add one to Gorch (Good Octets Received Count High).
4048	 * This could be simplified if all environments supported
4049	 * 64-bit integers.
4050	 */
4051	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4052		stats->gorch++;
4053	/* Is this a broadcast or multicast?  Check broadcast first,
4054	 * since the test for a multicast frame will test positive on
4055	 * a broadcast frame.
4056	 */
4057	if (is_broadcast_ether_addr(mac_addr))
4058		stats->bprc++;
4059	else if (is_multicast_ether_addr(mac_addr))
4060		stats->mprc++;
4061
4062	if (frame_len == hw->max_frame_size) {
4063		/* In this case, the hardware has overcounted the number of
4064		 * oversize frames.
4065		 */
4066		if (stats->roc > 0)
4067			stats->roc--;
4068	}
4069
4070	/* Adjust the bin counters when the extra byte put the frame in the
4071	 * wrong bin. Remember that the frame_len was adjusted above.
4072	 */
4073	if (frame_len == 64) {
4074		stats->prc64++;
4075		stats->prc127--;
4076	} else if (frame_len == 127) {
4077		stats->prc127++;
4078		stats->prc255--;
4079	} else if (frame_len == 255) {
4080		stats->prc255++;
4081		stats->prc511--;
4082	} else if (frame_len == 511) {
4083		stats->prc511++;
4084		stats->prc1023--;
4085	} else if (frame_len == 1023) {
4086		stats->prc1023++;
4087		stats->prc1522--;
4088	} else if (frame_len == 1522) {
4089		stats->prc1522++;
4090	}
4091}
4092
4093static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4094				    u8 status, u8 errors,
4095				    u32 length, const u8 *data)
4096{
4097	struct e1000_hw *hw = &adapter->hw;
4098	u8 last_byte = *(data + length - 1);
4099
4100	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4101		unsigned long irq_flags;
4102
4103		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4104		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4105		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4106
4107		return true;
4108	}
4109
4110	return false;
4111}
4112
4113static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4114					  unsigned int bufsz)
4115{
4116	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4117
4118	if (unlikely(!skb))
4119		adapter->alloc_rx_buff_failed++;
4120	return skb;
4121}
4122
4123/**
4124 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4125 * @adapter: board private structure
4126 * @rx_ring: ring to clean
4127 * @work_done: amount of napi work completed this call
4128 * @work_to_do: max amount of work allowed for this call to do
4129 *
4130 * the return value indicates whether actual cleaning was done, there
4131 * is no guarantee that everything was cleaned
4132 */
4133static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4134				     struct e1000_rx_ring *rx_ring,
4135				     int *work_done, int work_to_do)
4136{
4137	struct net_device *netdev = adapter->netdev;
4138	struct pci_dev *pdev = adapter->pdev;
4139	struct e1000_rx_desc *rx_desc, *next_rxd;
4140	struct e1000_rx_buffer *buffer_info, *next_buffer;
4141	u32 length;
4142	unsigned int i;
4143	int cleaned_count = 0;
4144	bool cleaned = false;
4145	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4146
4147	i = rx_ring->next_to_clean;
4148	rx_desc = E1000_RX_DESC(*rx_ring, i);
4149	buffer_info = &rx_ring->buffer_info[i];
4150
4151	while (rx_desc->status & E1000_RXD_STAT_DD) {
4152		struct sk_buff *skb;
4153		u8 status;
4154
4155		if (*work_done >= work_to_do)
4156			break;
4157		(*work_done)++;
4158		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4159
4160		status = rx_desc->status;
4161
4162		if (++i == rx_ring->count)
4163			i = 0;
4164
4165		next_rxd = E1000_RX_DESC(*rx_ring, i);
4166		prefetch(next_rxd);
4167
4168		next_buffer = &rx_ring->buffer_info[i];
4169
4170		cleaned = true;
4171		cleaned_count++;
4172		dma_unmap_page(&pdev->dev, buffer_info->dma,
4173			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4174		buffer_info->dma = 0;
4175
4176		length = le16_to_cpu(rx_desc->length);
4177
4178		/* errors is only valid for DD + EOP descriptors */
4179		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4180		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4181			u8 *mapped = page_address(buffer_info->rxbuf.page);
4182
4183			if (e1000_tbi_should_accept(adapter, status,
4184						    rx_desc->errors,
4185						    length, mapped)) {
4186				length--;
4187			} else if (netdev->features & NETIF_F_RXALL) {
4188				goto process_skb;
4189			} else {
4190				/* an error means any chain goes out the window
4191				 * too
4192				 */
4193				dev_kfree_skb(rx_ring->rx_skb_top);
 
4194				rx_ring->rx_skb_top = NULL;
4195				goto next_desc;
4196			}
4197		}
4198
4199#define rxtop rx_ring->rx_skb_top
4200process_skb:
4201		if (!(status & E1000_RXD_STAT_EOP)) {
4202			/* this descriptor is only the beginning (or middle) */
4203			if (!rxtop) {
4204				/* this is the beginning of a chain */
4205				rxtop = napi_get_frags(&adapter->napi);
4206				if (!rxtop)
4207					break;
4208
4209				skb_fill_page_desc(rxtop, 0,
4210						   buffer_info->rxbuf.page,
4211						   0, length);
4212			} else {
4213				/* this is the middle of a chain */
4214				skb_fill_page_desc(rxtop,
4215				    skb_shinfo(rxtop)->nr_frags,
4216				    buffer_info->rxbuf.page, 0, length);
4217			}
4218			e1000_consume_page(buffer_info, rxtop, length);
4219			goto next_desc;
4220		} else {
4221			if (rxtop) {
4222				/* end of the chain */
4223				skb_fill_page_desc(rxtop,
4224				    skb_shinfo(rxtop)->nr_frags,
4225				    buffer_info->rxbuf.page, 0, length);
4226				skb = rxtop;
4227				rxtop = NULL;
4228				e1000_consume_page(buffer_info, skb, length);
4229			} else {
4230				struct page *p;
4231				/* no chain, got EOP, this buf is the packet
4232				 * copybreak to save the put_page/alloc_page
4233				 */
4234				p = buffer_info->rxbuf.page;
4235				if (length <= copybreak) {
 
 
4236					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4237						length -= 4;
4238					skb = e1000_alloc_rx_skb(adapter,
4239								 length);
4240					if (!skb)
4241						break;
4242
4243					memcpy(skb_tail_pointer(skb),
4244					       page_address(p), length);
4245
 
4246					/* re-use the page, so don't erase
4247					 * buffer_info->rxbuf.page
4248					 */
4249					skb_put(skb, length);
4250					e1000_rx_checksum(adapter,
4251							  status | rx_desc->errors << 24,
4252							  le16_to_cpu(rx_desc->csum), skb);
4253
4254					total_rx_bytes += skb->len;
4255					total_rx_packets++;
4256
4257					e1000_receive_skb(adapter, status,
4258							  rx_desc->special, skb);
4259					goto next_desc;
4260				} else {
4261					skb = napi_get_frags(&adapter->napi);
4262					if (!skb) {
4263						adapter->alloc_rx_buff_failed++;
4264						break;
4265					}
4266					skb_fill_page_desc(skb, 0, p, 0,
4267							   length);
4268					e1000_consume_page(buffer_info, skb,
4269							   length);
4270				}
4271			}
4272		}
4273
4274		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4275		e1000_rx_checksum(adapter,
4276				  (u32)(status) |
4277				  ((u32)(rx_desc->errors) << 24),
4278				  le16_to_cpu(rx_desc->csum), skb);
4279
4280		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4281		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4282			pskb_trim(skb, skb->len - 4);
4283		total_rx_packets++;
4284
4285		if (status & E1000_RXD_STAT_VP) {
4286			__le16 vlan = rx_desc->special;
4287			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4288
4289			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4290		}
4291
4292		napi_gro_frags(&adapter->napi);
4293
4294next_desc:
4295		rx_desc->status = 0;
4296
4297		/* return some buffers to hardware, one at a time is too slow */
4298		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4299			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4300			cleaned_count = 0;
4301		}
4302
4303		/* use prefetched values */
4304		rx_desc = next_rxd;
4305		buffer_info = next_buffer;
4306	}
4307	rx_ring->next_to_clean = i;
4308
4309	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4310	if (cleaned_count)
4311		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4312
4313	adapter->total_rx_packets += total_rx_packets;
4314	adapter->total_rx_bytes += total_rx_bytes;
4315	netdev->stats.rx_bytes += total_rx_bytes;
4316	netdev->stats.rx_packets += total_rx_packets;
4317	return cleaned;
4318}
4319
4320/* this should improve performance for small packets with large amounts
4321 * of reassembly being done in the stack
4322 */
4323static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4324				       struct e1000_rx_buffer *buffer_info,
4325				       u32 length, const void *data)
4326{
4327	struct sk_buff *skb;
4328
4329	if (length > copybreak)
4330		return NULL;
4331
4332	skb = e1000_alloc_rx_skb(adapter, length);
4333	if (!skb)
4334		return NULL;
4335
4336	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4337				length, DMA_FROM_DEVICE);
4338
4339	skb_put_data(skb, data, length);
4340
4341	return skb;
4342}
4343
4344/**
4345 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4346 * @adapter: board private structure
4347 * @rx_ring: ring to clean
4348 * @work_done: amount of napi work completed this call
4349 * @work_to_do: max amount of work allowed for this call to do
4350 */
4351static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4352			       struct e1000_rx_ring *rx_ring,
4353			       int *work_done, int work_to_do)
4354{
4355	struct net_device *netdev = adapter->netdev;
4356	struct pci_dev *pdev = adapter->pdev;
4357	struct e1000_rx_desc *rx_desc, *next_rxd;
4358	struct e1000_rx_buffer *buffer_info, *next_buffer;
4359	u32 length;
4360	unsigned int i;
4361	int cleaned_count = 0;
4362	bool cleaned = false;
4363	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4364
4365	i = rx_ring->next_to_clean;
4366	rx_desc = E1000_RX_DESC(*rx_ring, i);
4367	buffer_info = &rx_ring->buffer_info[i];
4368
4369	while (rx_desc->status & E1000_RXD_STAT_DD) {
4370		struct sk_buff *skb;
4371		u8 *data;
4372		u8 status;
4373
4374		if (*work_done >= work_to_do)
4375			break;
4376		(*work_done)++;
4377		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4378
4379		status = rx_desc->status;
4380		length = le16_to_cpu(rx_desc->length);
4381
4382		data = buffer_info->rxbuf.data;
4383		prefetch(data);
4384		skb = e1000_copybreak(adapter, buffer_info, length, data);
4385		if (!skb) {
4386			unsigned int frag_len = e1000_frag_len(adapter);
4387
4388			skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4389			if (!skb) {
4390				adapter->alloc_rx_buff_failed++;
4391				break;
4392			}
4393
4394			skb_reserve(skb, E1000_HEADROOM);
4395			dma_unmap_single(&pdev->dev, buffer_info->dma,
4396					 adapter->rx_buffer_len,
4397					 DMA_FROM_DEVICE);
4398			buffer_info->dma = 0;
4399			buffer_info->rxbuf.data = NULL;
4400		}
4401
4402		if (++i == rx_ring->count)
4403			i = 0;
4404
4405		next_rxd = E1000_RX_DESC(*rx_ring, i);
4406		prefetch(next_rxd);
4407
4408		next_buffer = &rx_ring->buffer_info[i];
4409
4410		cleaned = true;
4411		cleaned_count++;
4412
4413		/* !EOP means multiple descriptors were used to store a single
4414		 * packet, if thats the case we need to toss it.  In fact, we
4415		 * to toss every packet with the EOP bit clear and the next
4416		 * frame that _does_ have the EOP bit set, as it is by
4417		 * definition only a frame fragment
4418		 */
4419		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4420			adapter->discarding = true;
4421
4422		if (adapter->discarding) {
4423			/* All receives must fit into a single buffer */
4424			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4425			dev_kfree_skb(skb);
4426			if (status & E1000_RXD_STAT_EOP)
4427				adapter->discarding = false;
4428			goto next_desc;
4429		}
4430
4431		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4432			if (e1000_tbi_should_accept(adapter, status,
4433						    rx_desc->errors,
4434						    length, data)) {
4435				length--;
4436			} else if (netdev->features & NETIF_F_RXALL) {
4437				goto process_skb;
4438			} else {
4439				dev_kfree_skb(skb);
4440				goto next_desc;
4441			}
4442		}
4443
4444process_skb:
4445		total_rx_bytes += (length - 4); /* don't count FCS */
4446		total_rx_packets++;
4447
4448		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4449			/* adjust length to remove Ethernet CRC, this must be
4450			 * done after the TBI_ACCEPT workaround above
4451			 */
4452			length -= 4;
4453
4454		if (buffer_info->rxbuf.data == NULL)
4455			skb_put(skb, length);
4456		else /* copybreak skb */
4457			skb_trim(skb, length);
4458
4459		/* Receive Checksum Offload */
4460		e1000_rx_checksum(adapter,
4461				  (u32)(status) |
4462				  ((u32)(rx_desc->errors) << 24),
4463				  le16_to_cpu(rx_desc->csum), skb);
4464
4465		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4466
4467next_desc:
4468		rx_desc->status = 0;
4469
4470		/* return some buffers to hardware, one at a time is too slow */
4471		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4472			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4473			cleaned_count = 0;
4474		}
4475
4476		/* use prefetched values */
4477		rx_desc = next_rxd;
4478		buffer_info = next_buffer;
4479	}
4480	rx_ring->next_to_clean = i;
4481
4482	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4483	if (cleaned_count)
4484		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4485
4486	adapter->total_rx_packets += total_rx_packets;
4487	adapter->total_rx_bytes += total_rx_bytes;
4488	netdev->stats.rx_bytes += total_rx_bytes;
4489	netdev->stats.rx_packets += total_rx_packets;
4490	return cleaned;
4491}
4492
4493/**
4494 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4495 * @adapter: address of board private structure
4496 * @rx_ring: pointer to receive ring structure
4497 * @cleaned_count: number of buffers to allocate this pass
4498 **/
4499static void
4500e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4501			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4502{
4503	struct pci_dev *pdev = adapter->pdev;
4504	struct e1000_rx_desc *rx_desc;
4505	struct e1000_rx_buffer *buffer_info;
4506	unsigned int i;
4507
4508	i = rx_ring->next_to_use;
4509	buffer_info = &rx_ring->buffer_info[i];
4510
4511	while (cleaned_count--) {
4512		/* allocate a new page if necessary */
4513		if (!buffer_info->rxbuf.page) {
4514			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4515			if (unlikely(!buffer_info->rxbuf.page)) {
4516				adapter->alloc_rx_buff_failed++;
4517				break;
4518			}
4519		}
4520
4521		if (!buffer_info->dma) {
4522			buffer_info->dma = dma_map_page(&pdev->dev,
4523							buffer_info->rxbuf.page, 0,
4524							adapter->rx_buffer_len,
4525							DMA_FROM_DEVICE);
4526			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4527				put_page(buffer_info->rxbuf.page);
4528				buffer_info->rxbuf.page = NULL;
4529				buffer_info->dma = 0;
4530				adapter->alloc_rx_buff_failed++;
4531				break;
4532			}
4533		}
4534
4535		rx_desc = E1000_RX_DESC(*rx_ring, i);
4536		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4537
4538		if (unlikely(++i == rx_ring->count))
4539			i = 0;
4540		buffer_info = &rx_ring->buffer_info[i];
4541	}
4542
4543	if (likely(rx_ring->next_to_use != i)) {
4544		rx_ring->next_to_use = i;
4545		if (unlikely(i-- == 0))
4546			i = (rx_ring->count - 1);
4547
4548		/* Force memory writes to complete before letting h/w
4549		 * know there are new descriptors to fetch.  (Only
4550		 * applicable for weak-ordered memory model archs,
4551		 * such as IA-64).
4552		 */
4553		dma_wmb();
4554		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4555	}
4556}
4557
4558/**
4559 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4560 * @adapter: address of board private structure
4561 * @rx_ring: pointer to ring struct
4562 * @cleaned_count: number of new Rx buffers to try to allocate
4563 **/
4564static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4565				   struct e1000_rx_ring *rx_ring,
4566				   int cleaned_count)
4567{
4568	struct e1000_hw *hw = &adapter->hw;
4569	struct pci_dev *pdev = adapter->pdev;
4570	struct e1000_rx_desc *rx_desc;
4571	struct e1000_rx_buffer *buffer_info;
4572	unsigned int i;
4573	unsigned int bufsz = adapter->rx_buffer_len;
4574
4575	i = rx_ring->next_to_use;
4576	buffer_info = &rx_ring->buffer_info[i];
4577
4578	while (cleaned_count--) {
4579		void *data;
4580
4581		if (buffer_info->rxbuf.data)
4582			goto skip;
4583
4584		data = e1000_alloc_frag(adapter);
4585		if (!data) {
4586			/* Better luck next round */
4587			adapter->alloc_rx_buff_failed++;
4588			break;
4589		}
4590
4591		/* Fix for errata 23, can't cross 64kB boundary */
4592		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4593			void *olddata = data;
4594			e_err(rx_err, "skb align check failed: %u bytes at "
4595			      "%p\n", bufsz, data);
4596			/* Try again, without freeing the previous */
4597			data = e1000_alloc_frag(adapter);
4598			/* Failed allocation, critical failure */
4599			if (!data) {
4600				skb_free_frag(olddata);
4601				adapter->alloc_rx_buff_failed++;
4602				break;
4603			}
4604
4605			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4606				/* give up */
4607				skb_free_frag(data);
4608				skb_free_frag(olddata);
4609				adapter->alloc_rx_buff_failed++;
4610				break;
4611			}
4612
4613			/* Use new allocation */
4614			skb_free_frag(olddata);
4615		}
4616		buffer_info->dma = dma_map_single(&pdev->dev,
4617						  data,
4618						  adapter->rx_buffer_len,
4619						  DMA_FROM_DEVICE);
4620		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4621			skb_free_frag(data);
4622			buffer_info->dma = 0;
4623			adapter->alloc_rx_buff_failed++;
4624			break;
4625		}
4626
4627		/* XXX if it was allocated cleanly it will never map to a
4628		 * boundary crossing
4629		 */
4630
4631		/* Fix for errata 23, can't cross 64kB boundary */
4632		if (!e1000_check_64k_bound(adapter,
4633					(void *)(unsigned long)buffer_info->dma,
4634					adapter->rx_buffer_len)) {
4635			e_err(rx_err, "dma align check failed: %u bytes at "
4636			      "%p\n", adapter->rx_buffer_len,
4637			      (void *)(unsigned long)buffer_info->dma);
4638
4639			dma_unmap_single(&pdev->dev, buffer_info->dma,
4640					 adapter->rx_buffer_len,
4641					 DMA_FROM_DEVICE);
4642
4643			skb_free_frag(data);
4644			buffer_info->rxbuf.data = NULL;
4645			buffer_info->dma = 0;
4646
4647			adapter->alloc_rx_buff_failed++;
4648			break;
4649		}
4650		buffer_info->rxbuf.data = data;
4651 skip:
4652		rx_desc = E1000_RX_DESC(*rx_ring, i);
4653		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4654
4655		if (unlikely(++i == rx_ring->count))
4656			i = 0;
4657		buffer_info = &rx_ring->buffer_info[i];
4658	}
4659
4660	if (likely(rx_ring->next_to_use != i)) {
4661		rx_ring->next_to_use = i;
4662		if (unlikely(i-- == 0))
4663			i = (rx_ring->count - 1);
4664
4665		/* Force memory writes to complete before letting h/w
4666		 * know there are new descriptors to fetch.  (Only
4667		 * applicable for weak-ordered memory model archs,
4668		 * such as IA-64).
4669		 */
4670		dma_wmb();
4671		writel(i, hw->hw_addr + rx_ring->rdt);
4672	}
4673}
4674
4675/**
4676 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4677 * @adapter: address of board private structure
4678 **/
4679static void e1000_smartspeed(struct e1000_adapter *adapter)
4680{
4681	struct e1000_hw *hw = &adapter->hw;
4682	u16 phy_status;
4683	u16 phy_ctrl;
4684
4685	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4686	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4687		return;
4688
4689	if (adapter->smartspeed == 0) {
4690		/* If Master/Slave config fault is asserted twice,
4691		 * we assume back-to-back
4692		 */
4693		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4694		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4695			return;
4696		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4697		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4698			return;
4699		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4700		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4701			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4702			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4703					    phy_ctrl);
4704			adapter->smartspeed++;
4705			if (!e1000_phy_setup_autoneg(hw) &&
4706			   !e1000_read_phy_reg(hw, PHY_CTRL,
4707					       &phy_ctrl)) {
4708				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4709					     MII_CR_RESTART_AUTO_NEG);
4710				e1000_write_phy_reg(hw, PHY_CTRL,
4711						    phy_ctrl);
4712			}
4713		}
4714		return;
4715	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4716		/* If still no link, perhaps using 2/3 pair cable */
4717		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4718		phy_ctrl |= CR_1000T_MS_ENABLE;
4719		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4720		if (!e1000_phy_setup_autoneg(hw) &&
4721		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4722			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4723				     MII_CR_RESTART_AUTO_NEG);
4724			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4725		}
4726	}
4727	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4728	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4729		adapter->smartspeed = 0;
4730}
4731
4732/**
4733 * e1000_ioctl - handle ioctl calls
4734 * @netdev: pointer to our netdev
4735 * @ifr: pointer to interface request structure
4736 * @cmd: ioctl data
4737 **/
4738static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4739{
4740	switch (cmd) {
4741	case SIOCGMIIPHY:
4742	case SIOCGMIIREG:
4743	case SIOCSMIIREG:
4744		return e1000_mii_ioctl(netdev, ifr, cmd);
4745	default:
4746		return -EOPNOTSUPP;
4747	}
4748}
4749
4750/**
4751 * e1000_mii_ioctl -
4752 * @netdev: pointer to our netdev
4753 * @ifr: pointer to interface request structure
4754 * @cmd: ioctl data
4755 **/
4756static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4757			   int cmd)
4758{
4759	struct e1000_adapter *adapter = netdev_priv(netdev);
4760	struct e1000_hw *hw = &adapter->hw;
4761	struct mii_ioctl_data *data = if_mii(ifr);
4762	int retval;
4763	u16 mii_reg;
4764	unsigned long flags;
4765
4766	if (hw->media_type != e1000_media_type_copper)
4767		return -EOPNOTSUPP;
4768
4769	switch (cmd) {
4770	case SIOCGMIIPHY:
4771		data->phy_id = hw->phy_addr;
4772		break;
4773	case SIOCGMIIREG:
4774		spin_lock_irqsave(&adapter->stats_lock, flags);
4775		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4776				   &data->val_out)) {
4777			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4778			return -EIO;
4779		}
4780		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4781		break;
4782	case SIOCSMIIREG:
4783		if (data->reg_num & ~(0x1F))
4784			return -EFAULT;
4785		mii_reg = data->val_in;
4786		spin_lock_irqsave(&adapter->stats_lock, flags);
4787		if (e1000_write_phy_reg(hw, data->reg_num,
4788					mii_reg)) {
4789			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4790			return -EIO;
4791		}
4792		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4793		if (hw->media_type == e1000_media_type_copper) {
4794			switch (data->reg_num) {
4795			case PHY_CTRL:
4796				if (mii_reg & MII_CR_POWER_DOWN)
4797					break;
4798				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4799					hw->autoneg = 1;
4800					hw->autoneg_advertised = 0x2F;
4801				} else {
4802					u32 speed;
4803					if (mii_reg & 0x40)
4804						speed = SPEED_1000;
4805					else if (mii_reg & 0x2000)
4806						speed = SPEED_100;
4807					else
4808						speed = SPEED_10;
4809					retval = e1000_set_spd_dplx(
4810						adapter, speed,
4811						((mii_reg & 0x100)
4812						 ? DUPLEX_FULL :
4813						 DUPLEX_HALF));
4814					if (retval)
4815						return retval;
4816				}
4817				if (netif_running(adapter->netdev))
4818					e1000_reinit_locked(adapter);
4819				else
4820					e1000_reset(adapter);
4821				break;
4822			case M88E1000_PHY_SPEC_CTRL:
4823			case M88E1000_EXT_PHY_SPEC_CTRL:
4824				if (e1000_phy_reset(hw))
4825					return -EIO;
4826				break;
4827			}
4828		} else {
4829			switch (data->reg_num) {
4830			case PHY_CTRL:
4831				if (mii_reg & MII_CR_POWER_DOWN)
4832					break;
4833				if (netif_running(adapter->netdev))
4834					e1000_reinit_locked(adapter);
4835				else
4836					e1000_reset(adapter);
4837				break;
4838			}
4839		}
4840		break;
4841	default:
4842		return -EOPNOTSUPP;
4843	}
4844	return E1000_SUCCESS;
4845}
4846
4847void e1000_pci_set_mwi(struct e1000_hw *hw)
4848{
4849	struct e1000_adapter *adapter = hw->back;
4850	int ret_val = pci_set_mwi(adapter->pdev);
4851
4852	if (ret_val)
4853		e_err(probe, "Error in setting MWI\n");
4854}
4855
4856void e1000_pci_clear_mwi(struct e1000_hw *hw)
4857{
4858	struct e1000_adapter *adapter = hw->back;
4859
4860	pci_clear_mwi(adapter->pdev);
4861}
4862
4863int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4864{
4865	struct e1000_adapter *adapter = hw->back;
4866	return pcix_get_mmrbc(adapter->pdev);
4867}
4868
4869void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4870{
4871	struct e1000_adapter *adapter = hw->back;
4872	pcix_set_mmrbc(adapter->pdev, mmrbc);
4873}
4874
4875void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4876{
4877	outl(value, port);
4878}
4879
4880static bool e1000_vlan_used(struct e1000_adapter *adapter)
4881{
4882	u16 vid;
4883
4884	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4885		return true;
4886	return false;
4887}
4888
4889static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4890			      netdev_features_t features)
4891{
4892	struct e1000_hw *hw = &adapter->hw;
4893	u32 ctrl;
4894
4895	ctrl = er32(CTRL);
4896	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4897		/* enable VLAN tag insert/strip */
4898		ctrl |= E1000_CTRL_VME;
4899	} else {
4900		/* disable VLAN tag insert/strip */
4901		ctrl &= ~E1000_CTRL_VME;
4902	}
4903	ew32(CTRL, ctrl);
4904}
4905static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4906				     bool filter_on)
4907{
4908	struct e1000_hw *hw = &adapter->hw;
4909	u32 rctl;
4910
4911	if (!test_bit(__E1000_DOWN, &adapter->flags))
4912		e1000_irq_disable(adapter);
4913
4914	__e1000_vlan_mode(adapter, adapter->netdev->features);
4915	if (filter_on) {
4916		/* enable VLAN receive filtering */
4917		rctl = er32(RCTL);
4918		rctl &= ~E1000_RCTL_CFIEN;
4919		if (!(adapter->netdev->flags & IFF_PROMISC))
4920			rctl |= E1000_RCTL_VFE;
4921		ew32(RCTL, rctl);
4922		e1000_update_mng_vlan(adapter);
4923	} else {
4924		/* disable VLAN receive filtering */
4925		rctl = er32(RCTL);
4926		rctl &= ~E1000_RCTL_VFE;
4927		ew32(RCTL, rctl);
4928	}
4929
4930	if (!test_bit(__E1000_DOWN, &adapter->flags))
4931		e1000_irq_enable(adapter);
4932}
4933
4934static void e1000_vlan_mode(struct net_device *netdev,
4935			    netdev_features_t features)
4936{
4937	struct e1000_adapter *adapter = netdev_priv(netdev);
4938
4939	if (!test_bit(__E1000_DOWN, &adapter->flags))
4940		e1000_irq_disable(adapter);
4941
4942	__e1000_vlan_mode(adapter, features);
4943
4944	if (!test_bit(__E1000_DOWN, &adapter->flags))
4945		e1000_irq_enable(adapter);
4946}
4947
4948static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4949				 __be16 proto, u16 vid)
4950{
4951	struct e1000_adapter *adapter = netdev_priv(netdev);
4952	struct e1000_hw *hw = &adapter->hw;
4953	u32 vfta, index;
4954
4955	if ((hw->mng_cookie.status &
4956	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4957	    (vid == adapter->mng_vlan_id))
4958		return 0;
4959
4960	if (!e1000_vlan_used(adapter))
4961		e1000_vlan_filter_on_off(adapter, true);
4962
4963	/* add VID to filter table */
4964	index = (vid >> 5) & 0x7F;
4965	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4966	vfta |= (1 << (vid & 0x1F));
4967	e1000_write_vfta(hw, index, vfta);
4968
4969	set_bit(vid, adapter->active_vlans);
4970
4971	return 0;
4972}
4973
4974static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4975				  __be16 proto, u16 vid)
4976{
4977	struct e1000_adapter *adapter = netdev_priv(netdev);
4978	struct e1000_hw *hw = &adapter->hw;
4979	u32 vfta, index;
4980
4981	if (!test_bit(__E1000_DOWN, &adapter->flags))
4982		e1000_irq_disable(adapter);
4983	if (!test_bit(__E1000_DOWN, &adapter->flags))
4984		e1000_irq_enable(adapter);
4985
4986	/* remove VID from filter table */
4987	index = (vid >> 5) & 0x7F;
4988	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4989	vfta &= ~(1 << (vid & 0x1F));
4990	e1000_write_vfta(hw, index, vfta);
4991
4992	clear_bit(vid, adapter->active_vlans);
4993
4994	if (!e1000_vlan_used(adapter))
4995		e1000_vlan_filter_on_off(adapter, false);
4996
4997	return 0;
4998}
4999
5000static void e1000_restore_vlan(struct e1000_adapter *adapter)
5001{
5002	u16 vid;
5003
5004	if (!e1000_vlan_used(adapter))
5005		return;
5006
5007	e1000_vlan_filter_on_off(adapter, true);
5008	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5009		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5010}
5011
5012int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5013{
5014	struct e1000_hw *hw = &adapter->hw;
5015
5016	hw->autoneg = 0;
5017
5018	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5019	 * for the switch() below to work
5020	 */
5021	if ((spd & 1) || (dplx & ~1))
5022		goto err_inval;
5023
5024	/* Fiber NICs only allow 1000 gbps Full duplex */
5025	if ((hw->media_type == e1000_media_type_fiber) &&
5026	    spd != SPEED_1000 &&
5027	    dplx != DUPLEX_FULL)
5028		goto err_inval;
5029
5030	switch (spd + dplx) {
5031	case SPEED_10 + DUPLEX_HALF:
5032		hw->forced_speed_duplex = e1000_10_half;
5033		break;
5034	case SPEED_10 + DUPLEX_FULL:
5035		hw->forced_speed_duplex = e1000_10_full;
5036		break;
5037	case SPEED_100 + DUPLEX_HALF:
5038		hw->forced_speed_duplex = e1000_100_half;
5039		break;
5040	case SPEED_100 + DUPLEX_FULL:
5041		hw->forced_speed_duplex = e1000_100_full;
5042		break;
5043	case SPEED_1000 + DUPLEX_FULL:
5044		hw->autoneg = 1;
5045		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5046		break;
5047	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5048	default:
5049		goto err_inval;
5050	}
5051
5052	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5053	hw->mdix = AUTO_ALL_MODES;
5054
5055	return 0;
5056
5057err_inval:
5058	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5059	return -EINVAL;
5060}
5061
5062static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5063{
5064	struct net_device *netdev = pci_get_drvdata(pdev);
5065	struct e1000_adapter *adapter = netdev_priv(netdev);
5066	struct e1000_hw *hw = &adapter->hw;
5067	u32 ctrl, ctrl_ext, rctl, status;
5068	u32 wufc = adapter->wol;
 
 
 
5069
5070	netif_device_detach(netdev);
5071
5072	if (netif_running(netdev)) {
5073		int count = E1000_CHECK_RESET_COUNT;
5074
5075		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5076			usleep_range(10000, 20000);
5077
5078		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5079		rtnl_lock();
5080		e1000_down(adapter);
5081		rtnl_unlock();
5082	}
5083
 
 
 
 
 
 
5084	status = er32(STATUS);
5085	if (status & E1000_STATUS_LU)
5086		wufc &= ~E1000_WUFC_LNKC;
5087
5088	if (wufc) {
5089		e1000_setup_rctl(adapter);
5090		e1000_set_rx_mode(netdev);
5091
5092		rctl = er32(RCTL);
5093
5094		/* turn on all-multi mode if wake on multicast is enabled */
5095		if (wufc & E1000_WUFC_MC)
5096			rctl |= E1000_RCTL_MPE;
5097
5098		/* enable receives in the hardware */
5099		ew32(RCTL, rctl | E1000_RCTL_EN);
5100
5101		if (hw->mac_type >= e1000_82540) {
5102			ctrl = er32(CTRL);
5103			/* advertise wake from D3Cold */
5104			#define E1000_CTRL_ADVD3WUC 0x00100000
5105			/* phy power management enable */
5106			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5107			ctrl |= E1000_CTRL_ADVD3WUC |
5108				E1000_CTRL_EN_PHY_PWR_MGMT;
5109			ew32(CTRL, ctrl);
5110		}
5111
5112		if (hw->media_type == e1000_media_type_fiber ||
5113		    hw->media_type == e1000_media_type_internal_serdes) {
5114			/* keep the laser running in D3 */
5115			ctrl_ext = er32(CTRL_EXT);
5116			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5117			ew32(CTRL_EXT, ctrl_ext);
5118		}
5119
5120		ew32(WUC, E1000_WUC_PME_EN);
5121		ew32(WUFC, wufc);
5122	} else {
5123		ew32(WUC, 0);
5124		ew32(WUFC, 0);
5125	}
5126
5127	e1000_release_manageability(adapter);
5128
5129	*enable_wake = !!wufc;
5130
5131	/* make sure adapter isn't asleep if manageability is enabled */
5132	if (adapter->en_mng_pt)
5133		*enable_wake = true;
5134
5135	if (netif_running(netdev))
5136		e1000_free_irq(adapter);
5137
5138	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5139		pci_disable_device(pdev);
5140
5141	return 0;
5142}
5143
5144static int e1000_suspend(struct device *dev)
 
5145{
5146	int retval;
5147	struct pci_dev *pdev = to_pci_dev(dev);
5148	bool wake;
5149
5150	retval = __e1000_shutdown(pdev, &wake);
5151	device_set_wakeup_enable(dev, wake);
 
5152
5153	return retval;
 
 
 
 
 
 
 
5154}
5155
5156static int e1000_resume(struct device *dev)
5157{
5158	struct pci_dev *pdev = to_pci_dev(dev);
5159	struct net_device *netdev = pci_get_drvdata(pdev);
5160	struct e1000_adapter *adapter = netdev_priv(netdev);
5161	struct e1000_hw *hw = &adapter->hw;
5162	u32 err;
5163
 
 
 
 
5164	if (adapter->need_ioport)
5165		err = pci_enable_device(pdev);
5166	else
5167		err = pci_enable_device_mem(pdev);
5168	if (err) {
5169		pr_err("Cannot enable PCI device from suspend\n");
5170		return err;
5171	}
5172
5173	/* flush memory to make sure state is correct */
5174	smp_mb__before_atomic();
5175	clear_bit(__E1000_DISABLED, &adapter->flags);
5176	pci_set_master(pdev);
5177
5178	pci_enable_wake(pdev, PCI_D3hot, 0);
5179	pci_enable_wake(pdev, PCI_D3cold, 0);
5180
5181	if (netif_running(netdev)) {
5182		err = e1000_request_irq(adapter);
5183		if (err)
5184			return err;
5185	}
5186
5187	e1000_power_up_phy(adapter);
5188	e1000_reset(adapter);
5189	ew32(WUS, ~0);
5190
5191	e1000_init_manageability(adapter);
5192
5193	if (netif_running(netdev))
5194		e1000_up(adapter);
5195
5196	netif_device_attach(netdev);
5197
5198	return 0;
5199}
 
5200
5201static void e1000_shutdown(struct pci_dev *pdev)
5202{
5203	bool wake;
5204
5205	__e1000_shutdown(pdev, &wake);
5206
5207	if (system_state == SYSTEM_POWER_OFF) {
5208		pci_wake_from_d3(pdev, wake);
5209		pci_set_power_state(pdev, PCI_D3hot);
5210	}
5211}
5212
5213#ifdef CONFIG_NET_POLL_CONTROLLER
5214/* Polling 'interrupt' - used by things like netconsole to send skbs
5215 * without having to re-enable interrupts. It's not called while
5216 * the interrupt routine is executing.
5217 */
5218static void e1000_netpoll(struct net_device *netdev)
5219{
5220	struct e1000_adapter *adapter = netdev_priv(netdev);
5221
5222	if (disable_hardirq(adapter->pdev->irq))
5223		e1000_intr(adapter->pdev->irq, netdev);
5224	enable_irq(adapter->pdev->irq);
5225}
5226#endif
5227
5228/**
5229 * e1000_io_error_detected - called when PCI error is detected
5230 * @pdev: Pointer to PCI device
5231 * @state: The current pci connection state
5232 *
5233 * This function is called after a PCI bus error affecting
5234 * this device has been detected.
5235 */
5236static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5237						pci_channel_state_t state)
5238{
5239	struct net_device *netdev = pci_get_drvdata(pdev);
5240	struct e1000_adapter *adapter = netdev_priv(netdev);
5241
5242	rtnl_lock();
5243	netif_device_detach(netdev);
5244
5245	if (state == pci_channel_io_perm_failure) {
5246		rtnl_unlock();
5247		return PCI_ERS_RESULT_DISCONNECT;
5248	}
5249
5250	if (netif_running(netdev))
5251		e1000_down(adapter);
5252
5253	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5254		pci_disable_device(pdev);
5255	rtnl_unlock();
5256
5257	/* Request a slot reset. */
5258	return PCI_ERS_RESULT_NEED_RESET;
5259}
5260
5261/**
5262 * e1000_io_slot_reset - called after the pci bus has been reset.
5263 * @pdev: Pointer to PCI device
5264 *
5265 * Restart the card from scratch, as if from a cold-boot. Implementation
5266 * resembles the first-half of the e1000_resume routine.
5267 */
5268static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5269{
5270	struct net_device *netdev = pci_get_drvdata(pdev);
5271	struct e1000_adapter *adapter = netdev_priv(netdev);
5272	struct e1000_hw *hw = &adapter->hw;
5273	int err;
5274
5275	if (adapter->need_ioport)
5276		err = pci_enable_device(pdev);
5277	else
5278		err = pci_enable_device_mem(pdev);
5279	if (err) {
5280		pr_err("Cannot re-enable PCI device after reset.\n");
5281		return PCI_ERS_RESULT_DISCONNECT;
5282	}
5283
5284	/* flush memory to make sure state is correct */
5285	smp_mb__before_atomic();
5286	clear_bit(__E1000_DISABLED, &adapter->flags);
5287	pci_set_master(pdev);
5288
5289	pci_enable_wake(pdev, PCI_D3hot, 0);
5290	pci_enable_wake(pdev, PCI_D3cold, 0);
5291
5292	e1000_reset(adapter);
5293	ew32(WUS, ~0);
5294
5295	return PCI_ERS_RESULT_RECOVERED;
5296}
5297
5298/**
5299 * e1000_io_resume - called when traffic can start flowing again.
5300 * @pdev: Pointer to PCI device
5301 *
5302 * This callback is called when the error recovery driver tells us that
5303 * its OK to resume normal operation. Implementation resembles the
5304 * second-half of the e1000_resume routine.
5305 */
5306static void e1000_io_resume(struct pci_dev *pdev)
5307{
5308	struct net_device *netdev = pci_get_drvdata(pdev);
5309	struct e1000_adapter *adapter = netdev_priv(netdev);
5310
5311	e1000_init_manageability(adapter);
5312
5313	if (netif_running(netdev)) {
5314		if (e1000_up(adapter)) {
5315			pr_info("can't bring device back up after reset\n");
5316			return;
5317		}
5318	}
5319
5320	netif_device_attach(netdev);
5321}
5322
5323/* e1000_main.c */
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*******************************************************************************
   3
   4  Intel PRO/1000 Linux driver
   5  Copyright(c) 1999 - 2006 Intel Corporation.
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  You should have received a copy of the GNU General Public License along with
  17  this program; if not, write to the Free Software Foundation, Inc.,
  18  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19
  20  The full GNU General Public License is included in this distribution in
  21  the file called "COPYING".
  22
  23  Contact Information:
  24  Linux NICS <linux.nics@intel.com>
  25  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  26  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27
  28*******************************************************************************/
  29
  30#include "e1000.h"
  31#include <net/ip6_checksum.h>
  32#include <linux/io.h>
  33#include <linux/prefetch.h>
  34#include <linux/bitops.h>
  35#include <linux/if_vlan.h>
  36
  37char e1000_driver_name[] = "e1000";
  38static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  39#define DRV_VERSION "7.3.21-k8-NAPI"
  40const char e1000_driver_version[] = DRV_VERSION;
  41static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  42
  43/* e1000_pci_tbl - PCI Device ID Table
  44 *
  45 * Last entry must be all 0s
  46 *
  47 * Macro expands to...
  48 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  49 */
  50static const struct pci_device_id e1000_pci_tbl[] = {
  51	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  53	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  54	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  55	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  56	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  57	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  58	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  59	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  60	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  61	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  62	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  63	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  64	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  65	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  66	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  67	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  68	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  69	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  70	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  71	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  72	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  73	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  74	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  75	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  76	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  77	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  78	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  79	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  80	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  81	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  82	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  83	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  84	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  85	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  86	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  87	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  88	/* required last entry */
  89	{0,}
  90};
  91
  92MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  93
  94int e1000_up(struct e1000_adapter *adapter);
  95void e1000_down(struct e1000_adapter *adapter);
  96void e1000_reinit_locked(struct e1000_adapter *adapter);
  97void e1000_reset(struct e1000_adapter *adapter);
  98int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  99int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 101void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 102static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 103				    struct e1000_tx_ring *txdr);
 104static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 105				    struct e1000_rx_ring *rxdr);
 106static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 107				    struct e1000_tx_ring *tx_ring);
 108static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 109				    struct e1000_rx_ring *rx_ring);
 110void e1000_update_stats(struct e1000_adapter *adapter);
 111
 112static int e1000_init_module(void);
 113static void e1000_exit_module(void);
 114static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 115static void e1000_remove(struct pci_dev *pdev);
 116static int e1000_alloc_queues(struct e1000_adapter *adapter);
 117static int e1000_sw_init(struct e1000_adapter *adapter);
 118int e1000_open(struct net_device *netdev);
 119int e1000_close(struct net_device *netdev);
 120static void e1000_configure_tx(struct e1000_adapter *adapter);
 121static void e1000_configure_rx(struct e1000_adapter *adapter);
 122static void e1000_setup_rctl(struct e1000_adapter *adapter);
 123static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 125static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 126				struct e1000_tx_ring *tx_ring);
 127static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 128				struct e1000_rx_ring *rx_ring);
 129static void e1000_set_rx_mode(struct net_device *netdev);
 130static void e1000_update_phy_info_task(struct work_struct *work);
 131static void e1000_watchdog(struct work_struct *work);
 132static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 133static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 134				    struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139			       struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142			       struct e1000_rx_ring *rx_ring,
 143			       int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145				     struct e1000_rx_ring *rx_ring,
 146				     int *work_done, int work_to_do);
 147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 148					 struct e1000_rx_ring *rx_ring,
 149					 int cleaned_count)
 150{
 151}
 152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 153				   struct e1000_rx_ring *rx_ring,
 154				   int cleaned_count);
 155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 156					 struct e1000_rx_ring *rx_ring,
 157					 int cleaned_count);
 158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 160			   int cmd);
 161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 163static void e1000_tx_timeout(struct net_device *dev);
 164static void e1000_reset_task(struct work_struct *work);
 165static void e1000_smartspeed(struct e1000_adapter *adapter);
 166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 167				       struct sk_buff *skb);
 168
 169static bool e1000_vlan_used(struct e1000_adapter *adapter);
 170static void e1000_vlan_mode(struct net_device *netdev,
 171			    netdev_features_t features);
 172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 173				     bool filter_on);
 174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 175				 __be16 proto, u16 vid);
 176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 177				  __be16 proto, u16 vid);
 178static void e1000_restore_vlan(struct e1000_adapter *adapter);
 179
 180#ifdef CONFIG_PM
 181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 182static int e1000_resume(struct pci_dev *pdev);
 183#endif
 184static void e1000_shutdown(struct pci_dev *pdev);
 185
 186#ifdef CONFIG_NET_POLL_CONTROLLER
 187/* for netdump / net console */
 188static void e1000_netpoll (struct net_device *netdev);
 189#endif
 190
 191#define COPYBREAK_DEFAULT 256
 192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 193module_param(copybreak, uint, 0644);
 194MODULE_PARM_DESC(copybreak,
 195	"Maximum size of packet that is copied to a new buffer on receive");
 196
 197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 198						pci_channel_state_t state);
 199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 200static void e1000_io_resume(struct pci_dev *pdev);
 201
 202static const struct pci_error_handlers e1000_err_handler = {
 203	.error_detected = e1000_io_error_detected,
 204	.slot_reset = e1000_io_slot_reset,
 205	.resume = e1000_io_resume,
 206};
 207
 
 
 208static struct pci_driver e1000_driver = {
 209	.name     = e1000_driver_name,
 210	.id_table = e1000_pci_tbl,
 211	.probe    = e1000_probe,
 212	.remove   = e1000_remove,
 213#ifdef CONFIG_PM
 214	/* Power Management Hooks */
 215	.suspend  = e1000_suspend,
 216	.resume   = e1000_resume,
 217#endif
 218	.shutdown = e1000_shutdown,
 219	.err_handler = &e1000_err_handler
 220};
 221
 222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 224MODULE_LICENSE("GPL");
 225MODULE_VERSION(DRV_VERSION);
 226
 227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 228static int debug = -1;
 229module_param(debug, int, 0);
 230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 231
 232/**
 233 * e1000_get_hw_dev - return device
 234 * used by hardware layer to print debugging information
 
 
 235 *
 236 **/
 237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 238{
 239	struct e1000_adapter *adapter = hw->back;
 240	return adapter->netdev;
 241}
 242
 243/**
 244 * e1000_init_module - Driver Registration Routine
 245 *
 246 * e1000_init_module is the first routine called when the driver is
 247 * loaded. All it does is register with the PCI subsystem.
 248 **/
 249static int __init e1000_init_module(void)
 250{
 251	int ret;
 252	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 253
 254	pr_info("%s\n", e1000_copyright);
 255
 256	ret = pci_register_driver(&e1000_driver);
 257	if (copybreak != COPYBREAK_DEFAULT) {
 258		if (copybreak == 0)
 259			pr_info("copybreak disabled\n");
 260		else
 261			pr_info("copybreak enabled for "
 262				   "packets <= %u bytes\n", copybreak);
 263	}
 264	return ret;
 265}
 266
 267module_init(e1000_init_module);
 268
 269/**
 270 * e1000_exit_module - Driver Exit Cleanup Routine
 271 *
 272 * e1000_exit_module is called just before the driver is removed
 273 * from memory.
 274 **/
 275static void __exit e1000_exit_module(void)
 276{
 277	pci_unregister_driver(&e1000_driver);
 278}
 279
 280module_exit(e1000_exit_module);
 281
 282static int e1000_request_irq(struct e1000_adapter *adapter)
 283{
 284	struct net_device *netdev = adapter->netdev;
 285	irq_handler_t handler = e1000_intr;
 286	int irq_flags = IRQF_SHARED;
 287	int err;
 288
 289	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 290			  netdev);
 291	if (err) {
 292		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 293	}
 294
 295	return err;
 296}
 297
 298static void e1000_free_irq(struct e1000_adapter *adapter)
 299{
 300	struct net_device *netdev = adapter->netdev;
 301
 302	free_irq(adapter->pdev->irq, netdev);
 303}
 304
 305/**
 306 * e1000_irq_disable - Mask off interrupt generation on the NIC
 307 * @adapter: board private structure
 308 **/
 309static void e1000_irq_disable(struct e1000_adapter *adapter)
 310{
 311	struct e1000_hw *hw = &adapter->hw;
 312
 313	ew32(IMC, ~0);
 314	E1000_WRITE_FLUSH();
 315	synchronize_irq(adapter->pdev->irq);
 316}
 317
 318/**
 319 * e1000_irq_enable - Enable default interrupt generation settings
 320 * @adapter: board private structure
 321 **/
 322static void e1000_irq_enable(struct e1000_adapter *adapter)
 323{
 324	struct e1000_hw *hw = &adapter->hw;
 325
 326	ew32(IMS, IMS_ENABLE_MASK);
 327	E1000_WRITE_FLUSH();
 328}
 329
 330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 331{
 332	struct e1000_hw *hw = &adapter->hw;
 333	struct net_device *netdev = adapter->netdev;
 334	u16 vid = hw->mng_cookie.vlan_id;
 335	u16 old_vid = adapter->mng_vlan_id;
 336
 337	if (!e1000_vlan_used(adapter))
 338		return;
 339
 340	if (!test_bit(vid, adapter->active_vlans)) {
 341		if (hw->mng_cookie.status &
 342		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 343			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 344			adapter->mng_vlan_id = vid;
 345		} else {
 346			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 347		}
 348		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 349		    (vid != old_vid) &&
 350		    !test_bit(old_vid, adapter->active_vlans))
 351			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 352					       old_vid);
 353	} else {
 354		adapter->mng_vlan_id = vid;
 355	}
 356}
 357
 358static void e1000_init_manageability(struct e1000_adapter *adapter)
 359{
 360	struct e1000_hw *hw = &adapter->hw;
 361
 362	if (adapter->en_mng_pt) {
 363		u32 manc = er32(MANC);
 364
 365		/* disable hardware interception of ARP */
 366		manc &= ~(E1000_MANC_ARP_EN);
 367
 368		ew32(MANC, manc);
 369	}
 370}
 371
 372static void e1000_release_manageability(struct e1000_adapter *adapter)
 373{
 374	struct e1000_hw *hw = &adapter->hw;
 375
 376	if (adapter->en_mng_pt) {
 377		u32 manc = er32(MANC);
 378
 379		/* re-enable hardware interception of ARP */
 380		manc |= E1000_MANC_ARP_EN;
 381
 382		ew32(MANC, manc);
 383	}
 384}
 385
 386/**
 387 * e1000_configure - configure the hardware for RX and TX
 388 * @adapter = private board structure
 389 **/
 390static void e1000_configure(struct e1000_adapter *adapter)
 391{
 392	struct net_device *netdev = adapter->netdev;
 393	int i;
 394
 395	e1000_set_rx_mode(netdev);
 396
 397	e1000_restore_vlan(adapter);
 398	e1000_init_manageability(adapter);
 399
 400	e1000_configure_tx(adapter);
 401	e1000_setup_rctl(adapter);
 402	e1000_configure_rx(adapter);
 403	/* call E1000_DESC_UNUSED which always leaves
 404	 * at least 1 descriptor unused to make sure
 405	 * next_to_use != next_to_clean
 406	 */
 407	for (i = 0; i < adapter->num_rx_queues; i++) {
 408		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 409		adapter->alloc_rx_buf(adapter, ring,
 410				      E1000_DESC_UNUSED(ring));
 411	}
 412}
 413
 414int e1000_up(struct e1000_adapter *adapter)
 415{
 416	struct e1000_hw *hw = &adapter->hw;
 417
 418	/* hardware has been reset, we need to reload some things */
 419	e1000_configure(adapter);
 420
 421	clear_bit(__E1000_DOWN, &adapter->flags);
 422
 423	napi_enable(&adapter->napi);
 424
 425	e1000_irq_enable(adapter);
 426
 427	netif_wake_queue(adapter->netdev);
 428
 429	/* fire a link change interrupt to start the watchdog */
 430	ew32(ICS, E1000_ICS_LSC);
 431	return 0;
 432}
 433
 434/**
 435 * e1000_power_up_phy - restore link in case the phy was powered down
 436 * @adapter: address of board private structure
 437 *
 438 * The phy may be powered down to save power and turn off link when the
 439 * driver is unloaded and wake on lan is not enabled (among others)
 440 * *** this routine MUST be followed by a call to e1000_reset ***
 441 **/
 442void e1000_power_up_phy(struct e1000_adapter *adapter)
 443{
 444	struct e1000_hw *hw = &adapter->hw;
 445	u16 mii_reg = 0;
 446
 447	/* Just clear the power down bit to wake the phy back up */
 448	if (hw->media_type == e1000_media_type_copper) {
 449		/* according to the manual, the phy will retain its
 450		 * settings across a power-down/up cycle
 451		 */
 452		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 453		mii_reg &= ~MII_CR_POWER_DOWN;
 454		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 455	}
 456}
 457
 458static void e1000_power_down_phy(struct e1000_adapter *adapter)
 459{
 460	struct e1000_hw *hw = &adapter->hw;
 461
 462	/* Power down the PHY so no link is implied when interface is down *
 463	 * The PHY cannot be powered down if any of the following is true *
 464	 * (a) WoL is enabled
 465	 * (b) AMT is active
 466	 * (c) SoL/IDER session is active
 467	 */
 468	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 469	   hw->media_type == e1000_media_type_copper) {
 470		u16 mii_reg = 0;
 471
 472		switch (hw->mac_type) {
 473		case e1000_82540:
 474		case e1000_82545:
 475		case e1000_82545_rev_3:
 476		case e1000_82546:
 477		case e1000_ce4100:
 478		case e1000_82546_rev_3:
 479		case e1000_82541:
 480		case e1000_82541_rev_2:
 481		case e1000_82547:
 482		case e1000_82547_rev_2:
 483			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 484				goto out;
 485			break;
 486		default:
 487			goto out;
 488		}
 489		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 490		mii_reg |= MII_CR_POWER_DOWN;
 491		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 492		msleep(1);
 493	}
 494out:
 495	return;
 496}
 497
 498static void e1000_down_and_stop(struct e1000_adapter *adapter)
 499{
 500	set_bit(__E1000_DOWN, &adapter->flags);
 501
 502	cancel_delayed_work_sync(&adapter->watchdog_task);
 503
 504	/*
 505	 * Since the watchdog task can reschedule other tasks, we should cancel
 506	 * it first, otherwise we can run into the situation when a work is
 507	 * still running after the adapter has been turned down.
 508	 */
 509
 510	cancel_delayed_work_sync(&adapter->phy_info_task);
 511	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 512
 513	/* Only kill reset task if adapter is not resetting */
 514	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 515		cancel_work_sync(&adapter->reset_task);
 516}
 517
 518void e1000_down(struct e1000_adapter *adapter)
 519{
 520	struct e1000_hw *hw = &adapter->hw;
 521	struct net_device *netdev = adapter->netdev;
 522	u32 rctl, tctl;
 523
 524	/* disable receives in the hardware */
 525	rctl = er32(RCTL);
 526	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 527	/* flush and sleep below */
 528
 529	netif_tx_disable(netdev);
 530
 531	/* disable transmits in the hardware */
 532	tctl = er32(TCTL);
 533	tctl &= ~E1000_TCTL_EN;
 534	ew32(TCTL, tctl);
 535	/* flush both disables and wait for them to finish */
 536	E1000_WRITE_FLUSH();
 537	msleep(10);
 538
 539	/* Set the carrier off after transmits have been disabled in the
 540	 * hardware, to avoid race conditions with e1000_watchdog() (which
 541	 * may be running concurrently to us, checking for the carrier
 542	 * bit to decide whether it should enable transmits again). Such
 543	 * a race condition would result into transmission being disabled
 544	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 545	 */
 546	netif_carrier_off(netdev);
 547
 
 
 548	napi_disable(&adapter->napi);
 549
 550	e1000_irq_disable(adapter);
 551
 552	/* Setting DOWN must be after irq_disable to prevent
 553	 * a screaming interrupt.  Setting DOWN also prevents
 554	 * tasks from rescheduling.
 555	 */
 556	e1000_down_and_stop(adapter);
 557
 558	adapter->link_speed = 0;
 559	adapter->link_duplex = 0;
 560
 561	e1000_reset(adapter);
 562	e1000_clean_all_tx_rings(adapter);
 563	e1000_clean_all_rx_rings(adapter);
 564}
 565
 566void e1000_reinit_locked(struct e1000_adapter *adapter)
 567{
 568	WARN_ON(in_interrupt());
 569	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 570		msleep(1);
 571	e1000_down(adapter);
 572	e1000_up(adapter);
 
 
 
 
 
 573	clear_bit(__E1000_RESETTING, &adapter->flags);
 574}
 575
 576void e1000_reset(struct e1000_adapter *adapter)
 577{
 578	struct e1000_hw *hw = &adapter->hw;
 579	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 580	bool legacy_pba_adjust = false;
 581	u16 hwm;
 582
 583	/* Repartition Pba for greater than 9k mtu
 584	 * To take effect CTRL.RST is required.
 585	 */
 586
 587	switch (hw->mac_type) {
 588	case e1000_82542_rev2_0:
 589	case e1000_82542_rev2_1:
 590	case e1000_82543:
 591	case e1000_82544:
 592	case e1000_82540:
 593	case e1000_82541:
 594	case e1000_82541_rev_2:
 595		legacy_pba_adjust = true;
 596		pba = E1000_PBA_48K;
 597		break;
 598	case e1000_82545:
 599	case e1000_82545_rev_3:
 600	case e1000_82546:
 601	case e1000_ce4100:
 602	case e1000_82546_rev_3:
 603		pba = E1000_PBA_48K;
 604		break;
 605	case e1000_82547:
 606	case e1000_82547_rev_2:
 607		legacy_pba_adjust = true;
 608		pba = E1000_PBA_30K;
 609		break;
 610	case e1000_undefined:
 611	case e1000_num_macs:
 612		break;
 613	}
 614
 615	if (legacy_pba_adjust) {
 616		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 617			pba -= 8; /* allocate more FIFO for Tx */
 618
 619		if (hw->mac_type == e1000_82547) {
 620			adapter->tx_fifo_head = 0;
 621			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 622			adapter->tx_fifo_size =
 623				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 624			atomic_set(&adapter->tx_fifo_stall, 0);
 625		}
 626	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 627		/* adjust PBA for jumbo frames */
 628		ew32(PBA, pba);
 629
 630		/* To maintain wire speed transmits, the Tx FIFO should be
 631		 * large enough to accommodate two full transmit packets,
 632		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 633		 * the Rx FIFO should be large enough to accommodate at least
 634		 * one full receive packet and is similarly rounded up and
 635		 * expressed in KB.
 636		 */
 637		pba = er32(PBA);
 638		/* upper 16 bits has Tx packet buffer allocation size in KB */
 639		tx_space = pba >> 16;
 640		/* lower 16 bits has Rx packet buffer allocation size in KB */
 641		pba &= 0xffff;
 642		/* the Tx fifo also stores 16 bytes of information about the Tx
 643		 * but don't include ethernet FCS because hardware appends it
 644		 */
 645		min_tx_space = (hw->max_frame_size +
 646				sizeof(struct e1000_tx_desc) -
 647				ETH_FCS_LEN) * 2;
 648		min_tx_space = ALIGN(min_tx_space, 1024);
 649		min_tx_space >>= 10;
 650		/* software strips receive CRC, so leave room for it */
 651		min_rx_space = hw->max_frame_size;
 652		min_rx_space = ALIGN(min_rx_space, 1024);
 653		min_rx_space >>= 10;
 654
 655		/* If current Tx allocation is less than the min Tx FIFO size,
 656		 * and the min Tx FIFO size is less than the current Rx FIFO
 657		 * allocation, take space away from current Rx allocation
 658		 */
 659		if (tx_space < min_tx_space &&
 660		    ((min_tx_space - tx_space) < pba)) {
 661			pba = pba - (min_tx_space - tx_space);
 662
 663			/* PCI/PCIx hardware has PBA alignment constraints */
 664			switch (hw->mac_type) {
 665			case e1000_82545 ... e1000_82546_rev_3:
 666				pba &= ~(E1000_PBA_8K - 1);
 667				break;
 668			default:
 669				break;
 670			}
 671
 672			/* if short on Rx space, Rx wins and must trump Tx
 673			 * adjustment or use Early Receive if available
 674			 */
 675			if (pba < min_rx_space)
 676				pba = min_rx_space;
 677		}
 678	}
 679
 680	ew32(PBA, pba);
 681
 682	/* flow control settings:
 683	 * The high water mark must be low enough to fit one full frame
 684	 * (or the size used for early receive) above it in the Rx FIFO.
 685	 * Set it to the lower of:
 686	 * - 90% of the Rx FIFO size, and
 687	 * - the full Rx FIFO size minus the early receive size (for parts
 688	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 689	 * - the full Rx FIFO size minus one full frame
 690	 */
 691	hwm = min(((pba << 10) * 9 / 10),
 692		  ((pba << 10) - hw->max_frame_size));
 693
 694	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 695	hw->fc_low_water = hw->fc_high_water - 8;
 696	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 697	hw->fc_send_xon = 1;
 698	hw->fc = hw->original_fc;
 699
 700	/* Allow time for pending master requests to run */
 701	e1000_reset_hw(hw);
 702	if (hw->mac_type >= e1000_82544)
 703		ew32(WUC, 0);
 704
 705	if (e1000_init_hw(hw))
 706		e_dev_err("Hardware Error\n");
 707	e1000_update_mng_vlan(adapter);
 708
 709	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 710	if (hw->mac_type >= e1000_82544 &&
 711	    hw->autoneg == 1 &&
 712	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 713		u32 ctrl = er32(CTRL);
 714		/* clear phy power management bit if we are in gig only mode,
 715		 * which if enabled will attempt negotiation to 100Mb, which
 716		 * can cause a loss of link at power off or driver unload
 717		 */
 718		ctrl &= ~E1000_CTRL_SWDPIN3;
 719		ew32(CTRL, ctrl);
 720	}
 721
 722	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 723	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 724
 725	e1000_reset_adaptive(hw);
 726	e1000_phy_get_info(hw, &adapter->phy_info);
 727
 728	e1000_release_manageability(adapter);
 729}
 730
 731/* Dump the eeprom for users having checksum issues */
 732static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 733{
 734	struct net_device *netdev = adapter->netdev;
 735	struct ethtool_eeprom eeprom;
 736	const struct ethtool_ops *ops = netdev->ethtool_ops;
 737	u8 *data;
 738	int i;
 739	u16 csum_old, csum_new = 0;
 740
 741	eeprom.len = ops->get_eeprom_len(netdev);
 742	eeprom.offset = 0;
 743
 744	data = kmalloc(eeprom.len, GFP_KERNEL);
 745	if (!data)
 746		return;
 747
 748	ops->get_eeprom(netdev, &eeprom, data);
 749
 750	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 751		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 752	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 753		csum_new += data[i] + (data[i + 1] << 8);
 754	csum_new = EEPROM_SUM - csum_new;
 755
 756	pr_err("/*********************/\n");
 757	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 758	pr_err("Calculated              : 0x%04x\n", csum_new);
 759
 760	pr_err("Offset    Values\n");
 761	pr_err("========  ======\n");
 762	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 763
 764	pr_err("Include this output when contacting your support provider.\n");
 765	pr_err("This is not a software error! Something bad happened to\n");
 766	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 767	pr_err("result in further problems, possibly loss of data,\n");
 768	pr_err("corruption or system hangs!\n");
 769	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 770	pr_err("which is invalid and requires you to set the proper MAC\n");
 771	pr_err("address manually before continuing to enable this network\n");
 772	pr_err("device. Please inspect the EEPROM dump and report the\n");
 773	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 774	pr_err("/*********************/\n");
 775
 776	kfree(data);
 777}
 778
 779/**
 780 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 781 * @pdev: PCI device information struct
 782 *
 783 * Return true if an adapter needs ioport resources
 784 **/
 785static int e1000_is_need_ioport(struct pci_dev *pdev)
 786{
 787	switch (pdev->device) {
 788	case E1000_DEV_ID_82540EM:
 789	case E1000_DEV_ID_82540EM_LOM:
 790	case E1000_DEV_ID_82540EP:
 791	case E1000_DEV_ID_82540EP_LOM:
 792	case E1000_DEV_ID_82540EP_LP:
 793	case E1000_DEV_ID_82541EI:
 794	case E1000_DEV_ID_82541EI_MOBILE:
 795	case E1000_DEV_ID_82541ER:
 796	case E1000_DEV_ID_82541ER_LOM:
 797	case E1000_DEV_ID_82541GI:
 798	case E1000_DEV_ID_82541GI_LF:
 799	case E1000_DEV_ID_82541GI_MOBILE:
 800	case E1000_DEV_ID_82544EI_COPPER:
 801	case E1000_DEV_ID_82544EI_FIBER:
 802	case E1000_DEV_ID_82544GC_COPPER:
 803	case E1000_DEV_ID_82544GC_LOM:
 804	case E1000_DEV_ID_82545EM_COPPER:
 805	case E1000_DEV_ID_82545EM_FIBER:
 806	case E1000_DEV_ID_82546EB_COPPER:
 807	case E1000_DEV_ID_82546EB_FIBER:
 808	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 809		return true;
 810	default:
 811		return false;
 812	}
 813}
 814
 815static netdev_features_t e1000_fix_features(struct net_device *netdev,
 816	netdev_features_t features)
 817{
 818	/* Since there is no support for separate Rx/Tx vlan accel
 819	 * enable/disable make sure Tx flag is always in same state as Rx.
 820	 */
 821	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 822		features |= NETIF_F_HW_VLAN_CTAG_TX;
 823	else
 824		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 825
 826	return features;
 827}
 828
 829static int e1000_set_features(struct net_device *netdev,
 830	netdev_features_t features)
 831{
 832	struct e1000_adapter *adapter = netdev_priv(netdev);
 833	netdev_features_t changed = features ^ netdev->features;
 834
 835	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 836		e1000_vlan_mode(netdev, features);
 837
 838	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 839		return 0;
 840
 841	netdev->features = features;
 842	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 843
 844	if (netif_running(netdev))
 845		e1000_reinit_locked(adapter);
 846	else
 847		e1000_reset(adapter);
 848
 849	return 0;
 850}
 851
 852static const struct net_device_ops e1000_netdev_ops = {
 853	.ndo_open		= e1000_open,
 854	.ndo_stop		= e1000_close,
 855	.ndo_start_xmit		= e1000_xmit_frame,
 856	.ndo_set_rx_mode	= e1000_set_rx_mode,
 857	.ndo_set_mac_address	= e1000_set_mac,
 858	.ndo_tx_timeout		= e1000_tx_timeout,
 859	.ndo_change_mtu		= e1000_change_mtu,
 860	.ndo_do_ioctl		= e1000_ioctl,
 861	.ndo_validate_addr	= eth_validate_addr,
 862	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 863	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 864#ifdef CONFIG_NET_POLL_CONTROLLER
 865	.ndo_poll_controller	= e1000_netpoll,
 866#endif
 867	.ndo_fix_features	= e1000_fix_features,
 868	.ndo_set_features	= e1000_set_features,
 869};
 870
 871/**
 872 * e1000_init_hw_struct - initialize members of hw struct
 873 * @adapter: board private struct
 874 * @hw: structure used by e1000_hw.c
 875 *
 876 * Factors out initialization of the e1000_hw struct to its own function
 877 * that can be called very early at init (just after struct allocation).
 878 * Fields are initialized based on PCI device information and
 879 * OS network device settings (MTU size).
 880 * Returns negative error codes if MAC type setup fails.
 881 */
 882static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 883				struct e1000_hw *hw)
 884{
 885	struct pci_dev *pdev = adapter->pdev;
 886
 887	/* PCI config space info */
 888	hw->vendor_id = pdev->vendor;
 889	hw->device_id = pdev->device;
 890	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 891	hw->subsystem_id = pdev->subsystem_device;
 892	hw->revision_id = pdev->revision;
 893
 894	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 895
 896	hw->max_frame_size = adapter->netdev->mtu +
 897			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 898	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 899
 900	/* identify the MAC */
 901	if (e1000_set_mac_type(hw)) {
 902		e_err(probe, "Unknown MAC Type\n");
 903		return -EIO;
 904	}
 905
 906	switch (hw->mac_type) {
 907	default:
 908		break;
 909	case e1000_82541:
 910	case e1000_82547:
 911	case e1000_82541_rev_2:
 912	case e1000_82547_rev_2:
 913		hw->phy_init_script = 1;
 914		break;
 915	}
 916
 917	e1000_set_media_type(hw);
 918	e1000_get_bus_info(hw);
 919
 920	hw->wait_autoneg_complete = false;
 921	hw->tbi_compatibility_en = true;
 922	hw->adaptive_ifs = true;
 923
 924	/* Copper options */
 925
 926	if (hw->media_type == e1000_media_type_copper) {
 927		hw->mdix = AUTO_ALL_MODES;
 928		hw->disable_polarity_correction = false;
 929		hw->master_slave = E1000_MASTER_SLAVE;
 930	}
 931
 932	return 0;
 933}
 934
 935/**
 936 * e1000_probe - Device Initialization Routine
 937 * @pdev: PCI device information struct
 938 * @ent: entry in e1000_pci_tbl
 939 *
 940 * Returns 0 on success, negative on failure
 941 *
 942 * e1000_probe initializes an adapter identified by a pci_dev structure.
 943 * The OS initialization, configuring of the adapter private structure,
 944 * and a hardware reset occur.
 945 **/
 946static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 947{
 948	struct net_device *netdev;
 949	struct e1000_adapter *adapter = NULL;
 950	struct e1000_hw *hw;
 951
 952	static int cards_found;
 953	static int global_quad_port_a; /* global ksp3 port a indication */
 954	int i, err, pci_using_dac;
 955	u16 eeprom_data = 0;
 956	u16 tmp = 0;
 957	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 958	int bars, need_ioport;
 959	bool disable_dev = false;
 960
 961	/* do not allocate ioport bars when not needed */
 962	need_ioport = e1000_is_need_ioport(pdev);
 963	if (need_ioport) {
 964		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 965		err = pci_enable_device(pdev);
 966	} else {
 967		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 968		err = pci_enable_device_mem(pdev);
 969	}
 970	if (err)
 971		return err;
 972
 973	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 974	if (err)
 975		goto err_pci_reg;
 976
 977	pci_set_master(pdev);
 978	err = pci_save_state(pdev);
 979	if (err)
 980		goto err_alloc_etherdev;
 981
 982	err = -ENOMEM;
 983	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 984	if (!netdev)
 985		goto err_alloc_etherdev;
 986
 987	SET_NETDEV_DEV(netdev, &pdev->dev);
 988
 989	pci_set_drvdata(pdev, netdev);
 990	adapter = netdev_priv(netdev);
 991	adapter->netdev = netdev;
 992	adapter->pdev = pdev;
 993	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 994	adapter->bars = bars;
 995	adapter->need_ioport = need_ioport;
 996
 997	hw = &adapter->hw;
 998	hw->back = adapter;
 999
1000	err = -EIO;
1001	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1002	if (!hw->hw_addr)
1003		goto err_ioremap;
1004
1005	if (adapter->need_ioport) {
1006		for (i = BAR_1; i <= BAR_5; i++) {
1007			if (pci_resource_len(pdev, i) == 0)
1008				continue;
1009			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1010				hw->io_base = pci_resource_start(pdev, i);
1011				break;
1012			}
1013		}
1014	}
1015
1016	/* make ready for any if (hw->...) below */
1017	err = e1000_init_hw_struct(adapter, hw);
1018	if (err)
1019		goto err_sw_init;
1020
1021	/* there is a workaround being applied below that limits
1022	 * 64-bit DMA addresses to 64-bit hardware.  There are some
1023	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1024	 */
1025	pci_using_dac = 0;
1026	if ((hw->bus_type == e1000_bus_type_pcix) &&
1027	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1028		pci_using_dac = 1;
1029	} else {
1030		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1031		if (err) {
1032			pr_err("No usable DMA config, aborting\n");
1033			goto err_dma;
1034		}
1035	}
1036
1037	netdev->netdev_ops = &e1000_netdev_ops;
1038	e1000_set_ethtool_ops(netdev);
1039	netdev->watchdog_timeo = 5 * HZ;
1040	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1041
1042	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1043
1044	adapter->bd_number = cards_found;
1045
1046	/* setup the private structure */
1047
1048	err = e1000_sw_init(adapter);
1049	if (err)
1050		goto err_sw_init;
1051
1052	err = -EIO;
1053	if (hw->mac_type == e1000_ce4100) {
1054		hw->ce4100_gbe_mdio_base_virt =
1055					ioremap(pci_resource_start(pdev, BAR_1),
1056						pci_resource_len(pdev, BAR_1));
1057
1058		if (!hw->ce4100_gbe_mdio_base_virt)
1059			goto err_mdio_ioremap;
1060	}
1061
1062	if (hw->mac_type >= e1000_82543) {
1063		netdev->hw_features = NETIF_F_SG |
1064				   NETIF_F_HW_CSUM |
1065				   NETIF_F_HW_VLAN_CTAG_RX;
1066		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1067				   NETIF_F_HW_VLAN_CTAG_FILTER;
1068	}
1069
1070	if ((hw->mac_type >= e1000_82544) &&
1071	   (hw->mac_type != e1000_82547))
1072		netdev->hw_features |= NETIF_F_TSO;
1073
1074	netdev->priv_flags |= IFF_SUPP_NOFCS;
1075
1076	netdev->features |= netdev->hw_features;
1077	netdev->hw_features |= (NETIF_F_RXCSUM |
1078				NETIF_F_RXALL |
1079				NETIF_F_RXFCS);
1080
1081	if (pci_using_dac) {
1082		netdev->features |= NETIF_F_HIGHDMA;
1083		netdev->vlan_features |= NETIF_F_HIGHDMA;
1084	}
1085
1086	netdev->vlan_features |= (NETIF_F_TSO |
1087				  NETIF_F_HW_CSUM |
1088				  NETIF_F_SG);
1089
1090	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1091	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1092	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1093		netdev->priv_flags |= IFF_UNICAST_FLT;
1094
1095	/* MTU range: 46 - 16110 */
1096	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1097	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1098
1099	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1100
1101	/* initialize eeprom parameters */
1102	if (e1000_init_eeprom_params(hw)) {
1103		e_err(probe, "EEPROM initialization failed\n");
1104		goto err_eeprom;
1105	}
1106
1107	/* before reading the EEPROM, reset the controller to
1108	 * put the device in a known good starting state
1109	 */
1110
1111	e1000_reset_hw(hw);
1112
1113	/* make sure the EEPROM is good */
1114	if (e1000_validate_eeprom_checksum(hw) < 0) {
1115		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1116		e1000_dump_eeprom(adapter);
1117		/* set MAC address to all zeroes to invalidate and temporary
1118		 * disable this device for the user. This blocks regular
1119		 * traffic while still permitting ethtool ioctls from reaching
1120		 * the hardware as well as allowing the user to run the
1121		 * interface after manually setting a hw addr using
1122		 * `ip set address`
1123		 */
1124		memset(hw->mac_addr, 0, netdev->addr_len);
1125	} else {
1126		/* copy the MAC address out of the EEPROM */
1127		if (e1000_read_mac_addr(hw))
1128			e_err(probe, "EEPROM Read Error\n");
1129	}
1130	/* don't block initialization here due to bad MAC address */
1131	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1132
1133	if (!is_valid_ether_addr(netdev->dev_addr))
1134		e_err(probe, "Invalid MAC Address\n");
1135
1136
1137	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1138	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1139			  e1000_82547_tx_fifo_stall_task);
1140	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1141	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1142
1143	e1000_check_options(adapter);
1144
1145	/* Initial Wake on LAN setting
1146	 * If APM wake is enabled in the EEPROM,
1147	 * enable the ACPI Magic Packet filter
1148	 */
1149
1150	switch (hw->mac_type) {
1151	case e1000_82542_rev2_0:
1152	case e1000_82542_rev2_1:
1153	case e1000_82543:
1154		break;
1155	case e1000_82544:
1156		e1000_read_eeprom(hw,
1157			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1158		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1159		break;
1160	case e1000_82546:
1161	case e1000_82546_rev_3:
1162		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1163			e1000_read_eeprom(hw,
1164				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1165			break;
1166		}
1167		/* Fall Through */
1168	default:
1169		e1000_read_eeprom(hw,
1170			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1171		break;
1172	}
1173	if (eeprom_data & eeprom_apme_mask)
1174		adapter->eeprom_wol |= E1000_WUFC_MAG;
1175
1176	/* now that we have the eeprom settings, apply the special cases
1177	 * where the eeprom may be wrong or the board simply won't support
1178	 * wake on lan on a particular port
1179	 */
1180	switch (pdev->device) {
1181	case E1000_DEV_ID_82546GB_PCIE:
1182		adapter->eeprom_wol = 0;
1183		break;
1184	case E1000_DEV_ID_82546EB_FIBER:
1185	case E1000_DEV_ID_82546GB_FIBER:
1186		/* Wake events only supported on port A for dual fiber
1187		 * regardless of eeprom setting
1188		 */
1189		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1190			adapter->eeprom_wol = 0;
1191		break;
1192	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1193		/* if quad port adapter, disable WoL on all but port A */
1194		if (global_quad_port_a != 0)
1195			adapter->eeprom_wol = 0;
1196		else
1197			adapter->quad_port_a = true;
1198		/* Reset for multiple quad port adapters */
1199		if (++global_quad_port_a == 4)
1200			global_quad_port_a = 0;
1201		break;
1202	}
1203
1204	/* initialize the wol settings based on the eeprom settings */
1205	adapter->wol = adapter->eeprom_wol;
1206	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1207
1208	/* Auto detect PHY address */
1209	if (hw->mac_type == e1000_ce4100) {
1210		for (i = 0; i < 32; i++) {
1211			hw->phy_addr = i;
1212			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1213
1214			if (tmp != 0 && tmp != 0xFF)
1215				break;
1216		}
1217
1218		if (i >= 32)
1219			goto err_eeprom;
1220	}
1221
1222	/* reset the hardware with the new settings */
1223	e1000_reset(adapter);
1224
1225	strcpy(netdev->name, "eth%d");
1226	err = register_netdev(netdev);
1227	if (err)
1228		goto err_register;
1229
1230	e1000_vlan_filter_on_off(adapter, false);
1231
1232	/* print bus type/speed/width info */
1233	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1234	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1235	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1236		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1237		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1238		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1239	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1240	       netdev->dev_addr);
1241
1242	/* carrier off reporting is important to ethtool even BEFORE open */
1243	netif_carrier_off(netdev);
1244
1245	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1246
1247	cards_found++;
1248	return 0;
1249
1250err_register:
1251err_eeprom:
1252	e1000_phy_hw_reset(hw);
1253
1254	if (hw->flash_address)
1255		iounmap(hw->flash_address);
1256	kfree(adapter->tx_ring);
1257	kfree(adapter->rx_ring);
1258err_dma:
1259err_sw_init:
1260err_mdio_ioremap:
1261	iounmap(hw->ce4100_gbe_mdio_base_virt);
1262	iounmap(hw->hw_addr);
1263err_ioremap:
1264	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1265	free_netdev(netdev);
1266err_alloc_etherdev:
1267	pci_release_selected_regions(pdev, bars);
1268err_pci_reg:
1269	if (!adapter || disable_dev)
1270		pci_disable_device(pdev);
1271	return err;
1272}
1273
1274/**
1275 * e1000_remove - Device Removal Routine
1276 * @pdev: PCI device information struct
1277 *
1278 * e1000_remove is called by the PCI subsystem to alert the driver
1279 * that it should release a PCI device. That could be caused by a
1280 * Hot-Plug event, or because the driver is going to be removed from
1281 * memory.
1282 **/
1283static void e1000_remove(struct pci_dev *pdev)
1284{
1285	struct net_device *netdev = pci_get_drvdata(pdev);
1286	struct e1000_adapter *adapter = netdev_priv(netdev);
1287	struct e1000_hw *hw = &adapter->hw;
1288	bool disable_dev;
1289
1290	e1000_down_and_stop(adapter);
1291	e1000_release_manageability(adapter);
1292
1293	unregister_netdev(netdev);
1294
1295	e1000_phy_hw_reset(hw);
1296
1297	kfree(adapter->tx_ring);
1298	kfree(adapter->rx_ring);
1299
1300	if (hw->mac_type == e1000_ce4100)
1301		iounmap(hw->ce4100_gbe_mdio_base_virt);
1302	iounmap(hw->hw_addr);
1303	if (hw->flash_address)
1304		iounmap(hw->flash_address);
1305	pci_release_selected_regions(pdev, adapter->bars);
1306
1307	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1308	free_netdev(netdev);
1309
1310	if (disable_dev)
1311		pci_disable_device(pdev);
1312}
1313
1314/**
1315 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1316 * @adapter: board private structure to initialize
1317 *
1318 * e1000_sw_init initializes the Adapter private data structure.
1319 * e1000_init_hw_struct MUST be called before this function
1320 **/
1321static int e1000_sw_init(struct e1000_adapter *adapter)
1322{
1323	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1324
1325	adapter->num_tx_queues = 1;
1326	adapter->num_rx_queues = 1;
1327
1328	if (e1000_alloc_queues(adapter)) {
1329		e_err(probe, "Unable to allocate memory for queues\n");
1330		return -ENOMEM;
1331	}
1332
1333	/* Explicitly disable IRQ since the NIC can be in any state. */
1334	e1000_irq_disable(adapter);
1335
1336	spin_lock_init(&adapter->stats_lock);
1337
1338	set_bit(__E1000_DOWN, &adapter->flags);
1339
1340	return 0;
1341}
1342
1343/**
1344 * e1000_alloc_queues - Allocate memory for all rings
1345 * @adapter: board private structure to initialize
1346 *
1347 * We allocate one ring per queue at run-time since we don't know the
1348 * number of queues at compile-time.
1349 **/
1350static int e1000_alloc_queues(struct e1000_adapter *adapter)
1351{
1352	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1353				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1354	if (!adapter->tx_ring)
1355		return -ENOMEM;
1356
1357	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1358				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1359	if (!adapter->rx_ring) {
1360		kfree(adapter->tx_ring);
1361		return -ENOMEM;
1362	}
1363
1364	return E1000_SUCCESS;
1365}
1366
1367/**
1368 * e1000_open - Called when a network interface is made active
1369 * @netdev: network interface device structure
1370 *
1371 * Returns 0 on success, negative value on failure
1372 *
1373 * The open entry point is called when a network interface is made
1374 * active by the system (IFF_UP).  At this point all resources needed
1375 * for transmit and receive operations are allocated, the interrupt
1376 * handler is registered with the OS, the watchdog task is started,
1377 * and the stack is notified that the interface is ready.
1378 **/
1379int e1000_open(struct net_device *netdev)
1380{
1381	struct e1000_adapter *adapter = netdev_priv(netdev);
1382	struct e1000_hw *hw = &adapter->hw;
1383	int err;
1384
1385	/* disallow open during test */
1386	if (test_bit(__E1000_TESTING, &adapter->flags))
1387		return -EBUSY;
1388
1389	netif_carrier_off(netdev);
1390
1391	/* allocate transmit descriptors */
1392	err = e1000_setup_all_tx_resources(adapter);
1393	if (err)
1394		goto err_setup_tx;
1395
1396	/* allocate receive descriptors */
1397	err = e1000_setup_all_rx_resources(adapter);
1398	if (err)
1399		goto err_setup_rx;
1400
1401	e1000_power_up_phy(adapter);
1402
1403	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1404	if ((hw->mng_cookie.status &
1405			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1406		e1000_update_mng_vlan(adapter);
1407	}
1408
1409	/* before we allocate an interrupt, we must be ready to handle it.
1410	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1411	 * as soon as we call pci_request_irq, so we have to setup our
1412	 * clean_rx handler before we do so.
1413	 */
1414	e1000_configure(adapter);
1415
1416	err = e1000_request_irq(adapter);
1417	if (err)
1418		goto err_req_irq;
1419
1420	/* From here on the code is the same as e1000_up() */
1421	clear_bit(__E1000_DOWN, &adapter->flags);
1422
 
1423	napi_enable(&adapter->napi);
 
 
1424
1425	e1000_irq_enable(adapter);
1426
1427	netif_start_queue(netdev);
1428
1429	/* fire a link status change interrupt to start the watchdog */
1430	ew32(ICS, E1000_ICS_LSC);
1431
1432	return E1000_SUCCESS;
1433
1434err_req_irq:
1435	e1000_power_down_phy(adapter);
1436	e1000_free_all_rx_resources(adapter);
1437err_setup_rx:
1438	e1000_free_all_tx_resources(adapter);
1439err_setup_tx:
1440	e1000_reset(adapter);
1441
1442	return err;
1443}
1444
1445/**
1446 * e1000_close - Disables a network interface
1447 * @netdev: network interface device structure
1448 *
1449 * Returns 0, this is not allowed to fail
1450 *
1451 * The close entry point is called when an interface is de-activated
1452 * by the OS.  The hardware is still under the drivers control, but
1453 * needs to be disabled.  A global MAC reset is issued to stop the
1454 * hardware, and all transmit and receive resources are freed.
1455 **/
1456int e1000_close(struct net_device *netdev)
1457{
1458	struct e1000_adapter *adapter = netdev_priv(netdev);
1459	struct e1000_hw *hw = &adapter->hw;
1460	int count = E1000_CHECK_RESET_COUNT;
1461
1462	while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1463		usleep_range(10000, 20000);
1464
1465	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
 
 
 
 
 
1466	e1000_down(adapter);
1467	e1000_power_down_phy(adapter);
1468	e1000_free_irq(adapter);
1469
1470	e1000_free_all_tx_resources(adapter);
1471	e1000_free_all_rx_resources(adapter);
1472
1473	/* kill manageability vlan ID if supported, but not if a vlan with
1474	 * the same ID is registered on the host OS (let 8021q kill it)
1475	 */
1476	if ((hw->mng_cookie.status &
1477	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1478	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1479		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1480				       adapter->mng_vlan_id);
1481	}
1482
1483	return 0;
1484}
1485
1486/**
1487 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1488 * @adapter: address of board private structure
1489 * @start: address of beginning of memory
1490 * @len: length of memory
1491 **/
1492static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1493				  unsigned long len)
1494{
1495	struct e1000_hw *hw = &adapter->hw;
1496	unsigned long begin = (unsigned long)start;
1497	unsigned long end = begin + len;
1498
1499	/* First rev 82545 and 82546 need to not allow any memory
1500	 * write location to cross 64k boundary due to errata 23
1501	 */
1502	if (hw->mac_type == e1000_82545 ||
1503	    hw->mac_type == e1000_ce4100 ||
1504	    hw->mac_type == e1000_82546) {
1505		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1506	}
1507
1508	return true;
1509}
1510
1511/**
1512 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1513 * @adapter: board private structure
1514 * @txdr:    tx descriptor ring (for a specific queue) to setup
1515 *
1516 * Return 0 on success, negative on failure
1517 **/
1518static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1519				    struct e1000_tx_ring *txdr)
1520{
1521	struct pci_dev *pdev = adapter->pdev;
1522	int size;
1523
1524	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1525	txdr->buffer_info = vzalloc(size);
1526	if (!txdr->buffer_info)
1527		return -ENOMEM;
1528
1529	/* round up to nearest 4K */
1530
1531	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1532	txdr->size = ALIGN(txdr->size, 4096);
1533
1534	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1535					GFP_KERNEL);
1536	if (!txdr->desc) {
1537setup_tx_desc_die:
1538		vfree(txdr->buffer_info);
1539		return -ENOMEM;
1540	}
1541
1542	/* Fix for errata 23, can't cross 64kB boundary */
1543	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1544		void *olddesc = txdr->desc;
1545		dma_addr_t olddma = txdr->dma;
1546		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1547		      txdr->size, txdr->desc);
1548		/* Try again, without freeing the previous */
1549		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1550						&txdr->dma, GFP_KERNEL);
1551		/* Failed allocation, critical failure */
1552		if (!txdr->desc) {
1553			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1554					  olddma);
1555			goto setup_tx_desc_die;
1556		}
1557
1558		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1559			/* give up */
1560			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1561					  txdr->dma);
1562			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1563					  olddma);
1564			e_err(probe, "Unable to allocate aligned memory "
1565			      "for the transmit descriptor ring\n");
1566			vfree(txdr->buffer_info);
1567			return -ENOMEM;
1568		} else {
1569			/* Free old allocation, new allocation was successful */
1570			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1571					  olddma);
1572		}
1573	}
1574	memset(txdr->desc, 0, txdr->size);
1575
1576	txdr->next_to_use = 0;
1577	txdr->next_to_clean = 0;
1578
1579	return 0;
1580}
1581
1582/**
1583 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1584 * 				  (Descriptors) for all queues
1585 * @adapter: board private structure
1586 *
1587 * Return 0 on success, negative on failure
1588 **/
1589int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1590{
1591	int i, err = 0;
1592
1593	for (i = 0; i < adapter->num_tx_queues; i++) {
1594		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1595		if (err) {
1596			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1597			for (i-- ; i >= 0; i--)
1598				e1000_free_tx_resources(adapter,
1599							&adapter->tx_ring[i]);
1600			break;
1601		}
1602	}
1603
1604	return err;
1605}
1606
1607/**
1608 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1609 * @adapter: board private structure
1610 *
1611 * Configure the Tx unit of the MAC after a reset.
1612 **/
1613static void e1000_configure_tx(struct e1000_adapter *adapter)
1614{
1615	u64 tdba;
1616	struct e1000_hw *hw = &adapter->hw;
1617	u32 tdlen, tctl, tipg;
1618	u32 ipgr1, ipgr2;
1619
1620	/* Setup the HW Tx Head and Tail descriptor pointers */
1621
1622	switch (adapter->num_tx_queues) {
1623	case 1:
1624	default:
1625		tdba = adapter->tx_ring[0].dma;
1626		tdlen = adapter->tx_ring[0].count *
1627			sizeof(struct e1000_tx_desc);
1628		ew32(TDLEN, tdlen);
1629		ew32(TDBAH, (tdba >> 32));
1630		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1631		ew32(TDT, 0);
1632		ew32(TDH, 0);
1633		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1634					   E1000_TDH : E1000_82542_TDH);
1635		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1636					   E1000_TDT : E1000_82542_TDT);
1637		break;
1638	}
1639
1640	/* Set the default values for the Tx Inter Packet Gap timer */
1641	if ((hw->media_type == e1000_media_type_fiber ||
1642	     hw->media_type == e1000_media_type_internal_serdes))
1643		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1644	else
1645		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1646
1647	switch (hw->mac_type) {
1648	case e1000_82542_rev2_0:
1649	case e1000_82542_rev2_1:
1650		tipg = DEFAULT_82542_TIPG_IPGT;
1651		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1652		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1653		break;
1654	default:
1655		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1656		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1657		break;
1658	}
1659	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1660	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1661	ew32(TIPG, tipg);
1662
1663	/* Set the Tx Interrupt Delay register */
1664
1665	ew32(TIDV, adapter->tx_int_delay);
1666	if (hw->mac_type >= e1000_82540)
1667		ew32(TADV, adapter->tx_abs_int_delay);
1668
1669	/* Program the Transmit Control Register */
1670
1671	tctl = er32(TCTL);
1672	tctl &= ~E1000_TCTL_CT;
1673	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1674		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1675
1676	e1000_config_collision_dist(hw);
1677
1678	/* Setup Transmit Descriptor Settings for eop descriptor */
1679	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1680
1681	/* only set IDE if we are delaying interrupts using the timers */
1682	if (adapter->tx_int_delay)
1683		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1684
1685	if (hw->mac_type < e1000_82543)
1686		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1687	else
1688		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1689
1690	/* Cache if we're 82544 running in PCI-X because we'll
1691	 * need this to apply a workaround later in the send path.
1692	 */
1693	if (hw->mac_type == e1000_82544 &&
1694	    hw->bus_type == e1000_bus_type_pcix)
1695		adapter->pcix_82544 = true;
1696
1697	ew32(TCTL, tctl);
1698
1699}
1700
1701/**
1702 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1703 * @adapter: board private structure
1704 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1705 *
1706 * Returns 0 on success, negative on failure
1707 **/
1708static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1709				    struct e1000_rx_ring *rxdr)
1710{
1711	struct pci_dev *pdev = adapter->pdev;
1712	int size, desc_len;
1713
1714	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1715	rxdr->buffer_info = vzalloc(size);
1716	if (!rxdr->buffer_info)
1717		return -ENOMEM;
1718
1719	desc_len = sizeof(struct e1000_rx_desc);
1720
1721	/* Round up to nearest 4K */
1722
1723	rxdr->size = rxdr->count * desc_len;
1724	rxdr->size = ALIGN(rxdr->size, 4096);
1725
1726	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1727					GFP_KERNEL);
1728	if (!rxdr->desc) {
1729setup_rx_desc_die:
1730		vfree(rxdr->buffer_info);
1731		return -ENOMEM;
1732	}
1733
1734	/* Fix for errata 23, can't cross 64kB boundary */
1735	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1736		void *olddesc = rxdr->desc;
1737		dma_addr_t olddma = rxdr->dma;
1738		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1739		      rxdr->size, rxdr->desc);
1740		/* Try again, without freeing the previous */
1741		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1742						&rxdr->dma, GFP_KERNEL);
1743		/* Failed allocation, critical failure */
1744		if (!rxdr->desc) {
1745			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746					  olddma);
1747			goto setup_rx_desc_die;
1748		}
1749
1750		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1751			/* give up */
1752			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1753					  rxdr->dma);
1754			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1755					  olddma);
1756			e_err(probe, "Unable to allocate aligned memory for "
1757			      "the Rx descriptor ring\n");
1758			goto setup_rx_desc_die;
1759		} else {
1760			/* Free old allocation, new allocation was successful */
1761			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1762					  olddma);
1763		}
1764	}
1765	memset(rxdr->desc, 0, rxdr->size);
1766
1767	rxdr->next_to_clean = 0;
1768	rxdr->next_to_use = 0;
1769	rxdr->rx_skb_top = NULL;
1770
1771	return 0;
1772}
1773
1774/**
1775 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1776 * 				  (Descriptors) for all queues
1777 * @adapter: board private structure
1778 *
1779 * Return 0 on success, negative on failure
1780 **/
1781int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1782{
1783	int i, err = 0;
1784
1785	for (i = 0; i < adapter->num_rx_queues; i++) {
1786		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1787		if (err) {
1788			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1789			for (i-- ; i >= 0; i--)
1790				e1000_free_rx_resources(adapter,
1791							&adapter->rx_ring[i]);
1792			break;
1793		}
1794	}
1795
1796	return err;
1797}
1798
1799/**
1800 * e1000_setup_rctl - configure the receive control registers
1801 * @adapter: Board private structure
1802 **/
1803static void e1000_setup_rctl(struct e1000_adapter *adapter)
1804{
1805	struct e1000_hw *hw = &adapter->hw;
1806	u32 rctl;
1807
1808	rctl = er32(RCTL);
1809
1810	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1811
1812	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1813		E1000_RCTL_RDMTS_HALF |
1814		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1815
1816	if (hw->tbi_compatibility_on == 1)
1817		rctl |= E1000_RCTL_SBP;
1818	else
1819		rctl &= ~E1000_RCTL_SBP;
1820
1821	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1822		rctl &= ~E1000_RCTL_LPE;
1823	else
1824		rctl |= E1000_RCTL_LPE;
1825
1826	/* Setup buffer sizes */
1827	rctl &= ~E1000_RCTL_SZ_4096;
1828	rctl |= E1000_RCTL_BSEX;
1829	switch (adapter->rx_buffer_len) {
1830	case E1000_RXBUFFER_2048:
1831	default:
1832		rctl |= E1000_RCTL_SZ_2048;
1833		rctl &= ~E1000_RCTL_BSEX;
1834		break;
1835	case E1000_RXBUFFER_4096:
1836		rctl |= E1000_RCTL_SZ_4096;
1837		break;
1838	case E1000_RXBUFFER_8192:
1839		rctl |= E1000_RCTL_SZ_8192;
1840		break;
1841	case E1000_RXBUFFER_16384:
1842		rctl |= E1000_RCTL_SZ_16384;
1843		break;
1844	}
1845
1846	/* This is useful for sniffing bad packets. */
1847	if (adapter->netdev->features & NETIF_F_RXALL) {
1848		/* UPE and MPE will be handled by normal PROMISC logic
1849		 * in e1000e_set_rx_mode
1850		 */
1851		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1852			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1853			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1854
1855		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1856			  E1000_RCTL_DPF | /* Allow filtered pause */
1857			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1858		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1859		 * and that breaks VLANs.
1860		 */
1861	}
1862
1863	ew32(RCTL, rctl);
1864}
1865
1866/**
1867 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1868 * @adapter: board private structure
1869 *
1870 * Configure the Rx unit of the MAC after a reset.
1871 **/
1872static void e1000_configure_rx(struct e1000_adapter *adapter)
1873{
1874	u64 rdba;
1875	struct e1000_hw *hw = &adapter->hw;
1876	u32 rdlen, rctl, rxcsum;
1877
1878	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1879		rdlen = adapter->rx_ring[0].count *
1880			sizeof(struct e1000_rx_desc);
1881		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1882		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1883	} else {
1884		rdlen = adapter->rx_ring[0].count *
1885			sizeof(struct e1000_rx_desc);
1886		adapter->clean_rx = e1000_clean_rx_irq;
1887		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1888	}
1889
1890	/* disable receives while setting up the descriptors */
1891	rctl = er32(RCTL);
1892	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1893
1894	/* set the Receive Delay Timer Register */
1895	ew32(RDTR, adapter->rx_int_delay);
1896
1897	if (hw->mac_type >= e1000_82540) {
1898		ew32(RADV, adapter->rx_abs_int_delay);
1899		if (adapter->itr_setting != 0)
1900			ew32(ITR, 1000000000 / (adapter->itr * 256));
1901	}
1902
1903	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1904	 * the Base and Length of the Rx Descriptor Ring
1905	 */
1906	switch (adapter->num_rx_queues) {
1907	case 1:
1908	default:
1909		rdba = adapter->rx_ring[0].dma;
1910		ew32(RDLEN, rdlen);
1911		ew32(RDBAH, (rdba >> 32));
1912		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1913		ew32(RDT, 0);
1914		ew32(RDH, 0);
1915		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1916					   E1000_RDH : E1000_82542_RDH);
1917		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1918					   E1000_RDT : E1000_82542_RDT);
1919		break;
1920	}
1921
1922	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1923	if (hw->mac_type >= e1000_82543) {
1924		rxcsum = er32(RXCSUM);
1925		if (adapter->rx_csum)
1926			rxcsum |= E1000_RXCSUM_TUOFL;
1927		else
1928			/* don't need to clear IPPCSE as it defaults to 0 */
1929			rxcsum &= ~E1000_RXCSUM_TUOFL;
1930		ew32(RXCSUM, rxcsum);
1931	}
1932
1933	/* Enable Receives */
1934	ew32(RCTL, rctl | E1000_RCTL_EN);
1935}
1936
1937/**
1938 * e1000_free_tx_resources - Free Tx Resources per Queue
1939 * @adapter: board private structure
1940 * @tx_ring: Tx descriptor ring for a specific queue
1941 *
1942 * Free all transmit software resources
1943 **/
1944static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945				    struct e1000_tx_ring *tx_ring)
1946{
1947	struct pci_dev *pdev = adapter->pdev;
1948
1949	e1000_clean_tx_ring(adapter, tx_ring);
1950
1951	vfree(tx_ring->buffer_info);
1952	tx_ring->buffer_info = NULL;
1953
1954	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1955			  tx_ring->dma);
1956
1957	tx_ring->desc = NULL;
1958}
1959
1960/**
1961 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1962 * @adapter: board private structure
1963 *
1964 * Free all transmit software resources
1965 **/
1966void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1967{
1968	int i;
1969
1970	for (i = 0; i < adapter->num_tx_queues; i++)
1971		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1972}
1973
1974static void
1975e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976				 struct e1000_tx_buffer *buffer_info)
 
1977{
1978	if (buffer_info->dma) {
1979		if (buffer_info->mapped_as_page)
1980			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981				       buffer_info->length, DMA_TO_DEVICE);
1982		else
1983			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1984					 buffer_info->length,
1985					 DMA_TO_DEVICE);
1986		buffer_info->dma = 0;
1987	}
1988	if (buffer_info->skb) {
1989		dev_kfree_skb_any(buffer_info->skb);
1990		buffer_info->skb = NULL;
1991	}
1992	buffer_info->time_stamp = 0;
1993	/* buffer_info must be completely set up in the transmit path */
1994}
1995
1996/**
1997 * e1000_clean_tx_ring - Free Tx Buffers
1998 * @adapter: board private structure
1999 * @tx_ring: ring to be cleaned
2000 **/
2001static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2002				struct e1000_tx_ring *tx_ring)
2003{
2004	struct e1000_hw *hw = &adapter->hw;
2005	struct e1000_tx_buffer *buffer_info;
2006	unsigned long size;
2007	unsigned int i;
2008
2009	/* Free all the Tx ring sk_buffs */
2010
2011	for (i = 0; i < tx_ring->count; i++) {
2012		buffer_info = &tx_ring->buffer_info[i];
2013		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2014	}
2015
2016	netdev_reset_queue(adapter->netdev);
2017	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2018	memset(tx_ring->buffer_info, 0, size);
2019
2020	/* Zero out the descriptor ring */
2021
2022	memset(tx_ring->desc, 0, tx_ring->size);
2023
2024	tx_ring->next_to_use = 0;
2025	tx_ring->next_to_clean = 0;
2026	tx_ring->last_tx_tso = false;
2027
2028	writel(0, hw->hw_addr + tx_ring->tdh);
2029	writel(0, hw->hw_addr + tx_ring->tdt);
2030}
2031
2032/**
2033 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2034 * @adapter: board private structure
2035 **/
2036static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2037{
2038	int i;
2039
2040	for (i = 0; i < adapter->num_tx_queues; i++)
2041		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2042}
2043
2044/**
2045 * e1000_free_rx_resources - Free Rx Resources
2046 * @adapter: board private structure
2047 * @rx_ring: ring to clean the resources from
2048 *
2049 * Free all receive software resources
2050 **/
2051static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2052				    struct e1000_rx_ring *rx_ring)
2053{
2054	struct pci_dev *pdev = adapter->pdev;
2055
2056	e1000_clean_rx_ring(adapter, rx_ring);
2057
2058	vfree(rx_ring->buffer_info);
2059	rx_ring->buffer_info = NULL;
2060
2061	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2062			  rx_ring->dma);
2063
2064	rx_ring->desc = NULL;
2065}
2066
2067/**
2068 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2069 * @adapter: board private structure
2070 *
2071 * Free all receive software resources
2072 **/
2073void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2074{
2075	int i;
2076
2077	for (i = 0; i < adapter->num_rx_queues; i++)
2078		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2079}
2080
2081#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2082static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2083{
2084	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2085		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2086}
2087
2088static void *e1000_alloc_frag(const struct e1000_adapter *a)
2089{
2090	unsigned int len = e1000_frag_len(a);
2091	u8 *data = netdev_alloc_frag(len);
2092
2093	if (likely(data))
2094		data += E1000_HEADROOM;
2095	return data;
2096}
2097
2098/**
2099 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2100 * @adapter: board private structure
2101 * @rx_ring: ring to free buffers from
2102 **/
2103static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2104				struct e1000_rx_ring *rx_ring)
2105{
2106	struct e1000_hw *hw = &adapter->hw;
2107	struct e1000_rx_buffer *buffer_info;
2108	struct pci_dev *pdev = adapter->pdev;
2109	unsigned long size;
2110	unsigned int i;
2111
2112	/* Free all the Rx netfrags */
2113	for (i = 0; i < rx_ring->count; i++) {
2114		buffer_info = &rx_ring->buffer_info[i];
2115		if (adapter->clean_rx == e1000_clean_rx_irq) {
2116			if (buffer_info->dma)
2117				dma_unmap_single(&pdev->dev, buffer_info->dma,
2118						 adapter->rx_buffer_len,
2119						 DMA_FROM_DEVICE);
2120			if (buffer_info->rxbuf.data) {
2121				skb_free_frag(buffer_info->rxbuf.data);
2122				buffer_info->rxbuf.data = NULL;
2123			}
2124		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2125			if (buffer_info->dma)
2126				dma_unmap_page(&pdev->dev, buffer_info->dma,
2127					       adapter->rx_buffer_len,
2128					       DMA_FROM_DEVICE);
2129			if (buffer_info->rxbuf.page) {
2130				put_page(buffer_info->rxbuf.page);
2131				buffer_info->rxbuf.page = NULL;
2132			}
2133		}
2134
2135		buffer_info->dma = 0;
2136	}
2137
2138	/* there also may be some cached data from a chained receive */
2139	napi_free_frags(&adapter->napi);
2140	rx_ring->rx_skb_top = NULL;
2141
2142	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2143	memset(rx_ring->buffer_info, 0, size);
2144
2145	/* Zero out the descriptor ring */
2146	memset(rx_ring->desc, 0, rx_ring->size);
2147
2148	rx_ring->next_to_clean = 0;
2149	rx_ring->next_to_use = 0;
2150
2151	writel(0, hw->hw_addr + rx_ring->rdh);
2152	writel(0, hw->hw_addr + rx_ring->rdt);
2153}
2154
2155/**
2156 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2157 * @adapter: board private structure
2158 **/
2159static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2160{
2161	int i;
2162
2163	for (i = 0; i < adapter->num_rx_queues; i++)
2164		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2165}
2166
2167/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2168 * and memory write and invalidate disabled for certain operations
2169 */
2170static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2171{
2172	struct e1000_hw *hw = &adapter->hw;
2173	struct net_device *netdev = adapter->netdev;
2174	u32 rctl;
2175
2176	e1000_pci_clear_mwi(hw);
2177
2178	rctl = er32(RCTL);
2179	rctl |= E1000_RCTL_RST;
2180	ew32(RCTL, rctl);
2181	E1000_WRITE_FLUSH();
2182	mdelay(5);
2183
2184	if (netif_running(netdev))
2185		e1000_clean_all_rx_rings(adapter);
2186}
2187
2188static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2189{
2190	struct e1000_hw *hw = &adapter->hw;
2191	struct net_device *netdev = adapter->netdev;
2192	u32 rctl;
2193
2194	rctl = er32(RCTL);
2195	rctl &= ~E1000_RCTL_RST;
2196	ew32(RCTL, rctl);
2197	E1000_WRITE_FLUSH();
2198	mdelay(5);
2199
2200	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2201		e1000_pci_set_mwi(hw);
2202
2203	if (netif_running(netdev)) {
2204		/* No need to loop, because 82542 supports only 1 queue */
2205		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2206		e1000_configure_rx(adapter);
2207		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2208	}
2209}
2210
2211/**
2212 * e1000_set_mac - Change the Ethernet Address of the NIC
2213 * @netdev: network interface device structure
2214 * @p: pointer to an address structure
2215 *
2216 * Returns 0 on success, negative on failure
2217 **/
2218static int e1000_set_mac(struct net_device *netdev, void *p)
2219{
2220	struct e1000_adapter *adapter = netdev_priv(netdev);
2221	struct e1000_hw *hw = &adapter->hw;
2222	struct sockaddr *addr = p;
2223
2224	if (!is_valid_ether_addr(addr->sa_data))
2225		return -EADDRNOTAVAIL;
2226
2227	/* 82542 2.0 needs to be in reset to write receive address registers */
2228
2229	if (hw->mac_type == e1000_82542_rev2_0)
2230		e1000_enter_82542_rst(adapter);
2231
2232	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2233	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2234
2235	e1000_rar_set(hw, hw->mac_addr, 0);
2236
2237	if (hw->mac_type == e1000_82542_rev2_0)
2238		e1000_leave_82542_rst(adapter);
2239
2240	return 0;
2241}
2242
2243/**
2244 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2245 * @netdev: network interface device structure
2246 *
2247 * The set_rx_mode entry point is called whenever the unicast or multicast
2248 * address lists or the network interface flags are updated. This routine is
2249 * responsible for configuring the hardware for proper unicast, multicast,
2250 * promiscuous mode, and all-multi behavior.
2251 **/
2252static void e1000_set_rx_mode(struct net_device *netdev)
2253{
2254	struct e1000_adapter *adapter = netdev_priv(netdev);
2255	struct e1000_hw *hw = &adapter->hw;
2256	struct netdev_hw_addr *ha;
2257	bool use_uc = false;
2258	u32 rctl;
2259	u32 hash_value;
2260	int i, rar_entries = E1000_RAR_ENTRIES;
2261	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2262	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2263
2264	if (!mcarray)
2265		return;
2266
2267	/* Check for Promiscuous and All Multicast modes */
2268
2269	rctl = er32(RCTL);
2270
2271	if (netdev->flags & IFF_PROMISC) {
2272		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2273		rctl &= ~E1000_RCTL_VFE;
2274	} else {
2275		if (netdev->flags & IFF_ALLMULTI)
2276			rctl |= E1000_RCTL_MPE;
2277		else
2278			rctl &= ~E1000_RCTL_MPE;
2279		/* Enable VLAN filter if there is a VLAN */
2280		if (e1000_vlan_used(adapter))
2281			rctl |= E1000_RCTL_VFE;
2282	}
2283
2284	if (netdev_uc_count(netdev) > rar_entries - 1) {
2285		rctl |= E1000_RCTL_UPE;
2286	} else if (!(netdev->flags & IFF_PROMISC)) {
2287		rctl &= ~E1000_RCTL_UPE;
2288		use_uc = true;
2289	}
2290
2291	ew32(RCTL, rctl);
2292
2293	/* 82542 2.0 needs to be in reset to write receive address registers */
2294
2295	if (hw->mac_type == e1000_82542_rev2_0)
2296		e1000_enter_82542_rst(adapter);
2297
2298	/* load the first 14 addresses into the exact filters 1-14. Unicast
2299	 * addresses take precedence to avoid disabling unicast filtering
2300	 * when possible.
2301	 *
2302	 * RAR 0 is used for the station MAC address
2303	 * if there are not 14 addresses, go ahead and clear the filters
2304	 */
2305	i = 1;
2306	if (use_uc)
2307		netdev_for_each_uc_addr(ha, netdev) {
2308			if (i == rar_entries)
2309				break;
2310			e1000_rar_set(hw, ha->addr, i++);
2311		}
2312
2313	netdev_for_each_mc_addr(ha, netdev) {
2314		if (i == rar_entries) {
2315			/* load any remaining addresses into the hash table */
2316			u32 hash_reg, hash_bit, mta;
2317			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2318			hash_reg = (hash_value >> 5) & 0x7F;
2319			hash_bit = hash_value & 0x1F;
2320			mta = (1 << hash_bit);
2321			mcarray[hash_reg] |= mta;
2322		} else {
2323			e1000_rar_set(hw, ha->addr, i++);
2324		}
2325	}
2326
2327	for (; i < rar_entries; i++) {
2328		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2329		E1000_WRITE_FLUSH();
2330		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2331		E1000_WRITE_FLUSH();
2332	}
2333
2334	/* write the hash table completely, write from bottom to avoid
2335	 * both stupid write combining chipsets, and flushing each write
2336	 */
2337	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2338		/* If we are on an 82544 has an errata where writing odd
2339		 * offsets overwrites the previous even offset, but writing
2340		 * backwards over the range solves the issue by always
2341		 * writing the odd offset first
2342		 */
2343		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2344	}
2345	E1000_WRITE_FLUSH();
2346
2347	if (hw->mac_type == e1000_82542_rev2_0)
2348		e1000_leave_82542_rst(adapter);
2349
2350	kfree(mcarray);
2351}
2352
2353/**
2354 * e1000_update_phy_info_task - get phy info
2355 * @work: work struct contained inside adapter struct
2356 *
2357 * Need to wait a few seconds after link up to get diagnostic information from
2358 * the phy
2359 */
2360static void e1000_update_phy_info_task(struct work_struct *work)
2361{
2362	struct e1000_adapter *adapter = container_of(work,
2363						     struct e1000_adapter,
2364						     phy_info_task.work);
2365
2366	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2367}
2368
2369/**
2370 * e1000_82547_tx_fifo_stall_task - task to complete work
2371 * @work: work struct contained inside adapter struct
2372 **/
2373static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2374{
2375	struct e1000_adapter *adapter = container_of(work,
2376						     struct e1000_adapter,
2377						     fifo_stall_task.work);
2378	struct e1000_hw *hw = &adapter->hw;
2379	struct net_device *netdev = adapter->netdev;
2380	u32 tctl;
2381
2382	if (atomic_read(&adapter->tx_fifo_stall)) {
2383		if ((er32(TDT) == er32(TDH)) &&
2384		   (er32(TDFT) == er32(TDFH)) &&
2385		   (er32(TDFTS) == er32(TDFHS))) {
2386			tctl = er32(TCTL);
2387			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2388			ew32(TDFT, adapter->tx_head_addr);
2389			ew32(TDFH, adapter->tx_head_addr);
2390			ew32(TDFTS, adapter->tx_head_addr);
2391			ew32(TDFHS, adapter->tx_head_addr);
2392			ew32(TCTL, tctl);
2393			E1000_WRITE_FLUSH();
2394
2395			adapter->tx_fifo_head = 0;
2396			atomic_set(&adapter->tx_fifo_stall, 0);
2397			netif_wake_queue(netdev);
2398		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2399			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2400		}
2401	}
2402}
2403
2404bool e1000_has_link(struct e1000_adapter *adapter)
2405{
2406	struct e1000_hw *hw = &adapter->hw;
2407	bool link_active = false;
2408
2409	/* get_link_status is set on LSC (link status) interrupt or rx
2410	 * sequence error interrupt (except on intel ce4100).
2411	 * get_link_status will stay false until the
2412	 * e1000_check_for_link establishes link for copper adapters
2413	 * ONLY
2414	 */
2415	switch (hw->media_type) {
2416	case e1000_media_type_copper:
2417		if (hw->mac_type == e1000_ce4100)
2418			hw->get_link_status = 1;
2419		if (hw->get_link_status) {
2420			e1000_check_for_link(hw);
2421			link_active = !hw->get_link_status;
2422		} else {
2423			link_active = true;
2424		}
2425		break;
2426	case e1000_media_type_fiber:
2427		e1000_check_for_link(hw);
2428		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2429		break;
2430	case e1000_media_type_internal_serdes:
2431		e1000_check_for_link(hw);
2432		link_active = hw->serdes_has_link;
2433		break;
2434	default:
2435		break;
2436	}
2437
2438	return link_active;
2439}
2440
2441/**
2442 * e1000_watchdog - work function
2443 * @work: work struct contained inside adapter struct
2444 **/
2445static void e1000_watchdog(struct work_struct *work)
2446{
2447	struct e1000_adapter *adapter = container_of(work,
2448						     struct e1000_adapter,
2449						     watchdog_task.work);
2450	struct e1000_hw *hw = &adapter->hw;
2451	struct net_device *netdev = adapter->netdev;
2452	struct e1000_tx_ring *txdr = adapter->tx_ring;
2453	u32 link, tctl;
2454
2455	link = e1000_has_link(adapter);
2456	if ((netif_carrier_ok(netdev)) && link)
2457		goto link_up;
2458
2459	if (link) {
2460		if (!netif_carrier_ok(netdev)) {
2461			u32 ctrl;
2462			bool txb2b = true;
2463			/* update snapshot of PHY registers on LSC */
2464			e1000_get_speed_and_duplex(hw,
2465						   &adapter->link_speed,
2466						   &adapter->link_duplex);
2467
2468			ctrl = er32(CTRL);
2469			pr_info("%s NIC Link is Up %d Mbps %s, "
2470				"Flow Control: %s\n",
2471				netdev->name,
2472				adapter->link_speed,
2473				adapter->link_duplex == FULL_DUPLEX ?
2474				"Full Duplex" : "Half Duplex",
2475				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2476				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2477				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2478				E1000_CTRL_TFCE) ? "TX" : "None")));
2479
2480			/* adjust timeout factor according to speed/duplex */
2481			adapter->tx_timeout_factor = 1;
2482			switch (adapter->link_speed) {
2483			case SPEED_10:
2484				txb2b = false;
2485				adapter->tx_timeout_factor = 16;
2486				break;
2487			case SPEED_100:
2488				txb2b = false;
2489				/* maybe add some timeout factor ? */
2490				break;
2491			}
2492
2493			/* enable transmits in the hardware */
2494			tctl = er32(TCTL);
2495			tctl |= E1000_TCTL_EN;
2496			ew32(TCTL, tctl);
2497
2498			netif_carrier_on(netdev);
2499			if (!test_bit(__E1000_DOWN, &adapter->flags))
2500				schedule_delayed_work(&adapter->phy_info_task,
2501						      2 * HZ);
2502			adapter->smartspeed = 0;
2503		}
2504	} else {
2505		if (netif_carrier_ok(netdev)) {
2506			adapter->link_speed = 0;
2507			adapter->link_duplex = 0;
2508			pr_info("%s NIC Link is Down\n",
2509				netdev->name);
2510			netif_carrier_off(netdev);
2511
2512			if (!test_bit(__E1000_DOWN, &adapter->flags))
2513				schedule_delayed_work(&adapter->phy_info_task,
2514						      2 * HZ);
2515		}
2516
2517		e1000_smartspeed(adapter);
2518	}
2519
2520link_up:
2521	e1000_update_stats(adapter);
2522
2523	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2524	adapter->tpt_old = adapter->stats.tpt;
2525	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2526	adapter->colc_old = adapter->stats.colc;
2527
2528	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2529	adapter->gorcl_old = adapter->stats.gorcl;
2530	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2531	adapter->gotcl_old = adapter->stats.gotcl;
2532
2533	e1000_update_adaptive(hw);
2534
2535	if (!netif_carrier_ok(netdev)) {
2536		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2537			/* We've lost link, so the controller stops DMA,
2538			 * but we've got queued Tx work that's never going
2539			 * to get done, so reset controller to flush Tx.
2540			 * (Do the reset outside of interrupt context).
2541			 */
2542			adapter->tx_timeout_count++;
2543			schedule_work(&adapter->reset_task);
2544			/* exit immediately since reset is imminent */
2545			return;
2546		}
2547	}
2548
2549	/* Simple mode for Interrupt Throttle Rate (ITR) */
2550	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2551		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2552		 * Total asymmetrical Tx or Rx gets ITR=8000;
2553		 * everyone else is between 2000-8000.
2554		 */
2555		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2556		u32 dif = (adapter->gotcl > adapter->gorcl ?
2557			    adapter->gotcl - adapter->gorcl :
2558			    adapter->gorcl - adapter->gotcl) / 10000;
2559		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2560
2561		ew32(ITR, 1000000000 / (itr * 256));
2562	}
2563
2564	/* Cause software interrupt to ensure rx ring is cleaned */
2565	ew32(ICS, E1000_ICS_RXDMT0);
2566
2567	/* Force detection of hung controller every watchdog period */
2568	adapter->detect_tx_hung = true;
2569
2570	/* Reschedule the task */
2571	if (!test_bit(__E1000_DOWN, &adapter->flags))
2572		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2573}
2574
2575enum latency_range {
2576	lowest_latency = 0,
2577	low_latency = 1,
2578	bulk_latency = 2,
2579	latency_invalid = 255
2580};
2581
2582/**
2583 * e1000_update_itr - update the dynamic ITR value based on statistics
2584 * @adapter: pointer to adapter
2585 * @itr_setting: current adapter->itr
2586 * @packets: the number of packets during this measurement interval
2587 * @bytes: the number of bytes during this measurement interval
2588 *
2589 *      Stores a new ITR value based on packets and byte
2590 *      counts during the last interrupt.  The advantage of per interrupt
2591 *      computation is faster updates and more accurate ITR for the current
2592 *      traffic pattern.  Constants in this function were computed
2593 *      based on theoretical maximum wire speed and thresholds were set based
2594 *      on testing data as well as attempting to minimize response time
2595 *      while increasing bulk throughput.
2596 *      this functionality is controlled by the InterruptThrottleRate module
2597 *      parameter (see e1000_param.c)
2598 **/
2599static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2600				     u16 itr_setting, int packets, int bytes)
2601{
2602	unsigned int retval = itr_setting;
2603	struct e1000_hw *hw = &adapter->hw;
2604
2605	if (unlikely(hw->mac_type < e1000_82540))
2606		goto update_itr_done;
2607
2608	if (packets == 0)
2609		goto update_itr_done;
2610
2611	switch (itr_setting) {
2612	case lowest_latency:
2613		/* jumbo frames get bulk treatment*/
2614		if (bytes/packets > 8000)
2615			retval = bulk_latency;
2616		else if ((packets < 5) && (bytes > 512))
2617			retval = low_latency;
2618		break;
2619	case low_latency:  /* 50 usec aka 20000 ints/s */
2620		if (bytes > 10000) {
2621			/* jumbo frames need bulk latency setting */
2622			if (bytes/packets > 8000)
2623				retval = bulk_latency;
2624			else if ((packets < 10) || ((bytes/packets) > 1200))
2625				retval = bulk_latency;
2626			else if ((packets > 35))
2627				retval = lowest_latency;
2628		} else if (bytes/packets > 2000)
2629			retval = bulk_latency;
2630		else if (packets <= 2 && bytes < 512)
2631			retval = lowest_latency;
2632		break;
2633	case bulk_latency: /* 250 usec aka 4000 ints/s */
2634		if (bytes > 25000) {
2635			if (packets > 35)
2636				retval = low_latency;
2637		} else if (bytes < 6000) {
2638			retval = low_latency;
2639		}
2640		break;
2641	}
2642
2643update_itr_done:
2644	return retval;
2645}
2646
2647static void e1000_set_itr(struct e1000_adapter *adapter)
2648{
2649	struct e1000_hw *hw = &adapter->hw;
2650	u16 current_itr;
2651	u32 new_itr = adapter->itr;
2652
2653	if (unlikely(hw->mac_type < e1000_82540))
2654		return;
2655
2656	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2657	if (unlikely(adapter->link_speed != SPEED_1000)) {
2658		current_itr = 0;
2659		new_itr = 4000;
2660		goto set_itr_now;
2661	}
2662
2663	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2664					   adapter->total_tx_packets,
2665					   adapter->total_tx_bytes);
2666	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2667	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2668		adapter->tx_itr = low_latency;
2669
2670	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2671					   adapter->total_rx_packets,
2672					   adapter->total_rx_bytes);
2673	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2674	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2675		adapter->rx_itr = low_latency;
2676
2677	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2678
2679	switch (current_itr) {
2680	/* counts and packets in update_itr are dependent on these numbers */
2681	case lowest_latency:
2682		new_itr = 70000;
2683		break;
2684	case low_latency:
2685		new_itr = 20000; /* aka hwitr = ~200 */
2686		break;
2687	case bulk_latency:
2688		new_itr = 4000;
2689		break;
2690	default:
2691		break;
2692	}
2693
2694set_itr_now:
2695	if (new_itr != adapter->itr) {
2696		/* this attempts to bias the interrupt rate towards Bulk
2697		 * by adding intermediate steps when interrupt rate is
2698		 * increasing
2699		 */
2700		new_itr = new_itr > adapter->itr ?
2701			  min(adapter->itr + (new_itr >> 2), new_itr) :
2702			  new_itr;
2703		adapter->itr = new_itr;
2704		ew32(ITR, 1000000000 / (new_itr * 256));
2705	}
2706}
2707
2708#define E1000_TX_FLAGS_CSUM		0x00000001
2709#define E1000_TX_FLAGS_VLAN		0x00000002
2710#define E1000_TX_FLAGS_TSO		0x00000004
2711#define E1000_TX_FLAGS_IPV4		0x00000008
2712#define E1000_TX_FLAGS_NO_FCS		0x00000010
2713#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2714#define E1000_TX_FLAGS_VLAN_SHIFT	16
2715
2716static int e1000_tso(struct e1000_adapter *adapter,
2717		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2718		     __be16 protocol)
2719{
2720	struct e1000_context_desc *context_desc;
2721	struct e1000_tx_buffer *buffer_info;
2722	unsigned int i;
2723	u32 cmd_length = 0;
2724	u16 ipcse = 0, tucse, mss;
2725	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2726
2727	if (skb_is_gso(skb)) {
2728		int err;
2729
2730		err = skb_cow_head(skb, 0);
2731		if (err < 0)
2732			return err;
2733
2734		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2735		mss = skb_shinfo(skb)->gso_size;
2736		if (protocol == htons(ETH_P_IP)) {
2737			struct iphdr *iph = ip_hdr(skb);
2738			iph->tot_len = 0;
2739			iph->check = 0;
2740			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2741								 iph->daddr, 0,
2742								 IPPROTO_TCP,
2743								 0);
2744			cmd_length = E1000_TXD_CMD_IP;
2745			ipcse = skb_transport_offset(skb) - 1;
2746		} else if (skb_is_gso_v6(skb)) {
2747			ipv6_hdr(skb)->payload_len = 0;
2748			tcp_hdr(skb)->check =
2749				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2750						 &ipv6_hdr(skb)->daddr,
2751						 0, IPPROTO_TCP, 0);
2752			ipcse = 0;
2753		}
2754		ipcss = skb_network_offset(skb);
2755		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2756		tucss = skb_transport_offset(skb);
2757		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2758		tucse = 0;
2759
2760		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2761			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2762
2763		i = tx_ring->next_to_use;
2764		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2765		buffer_info = &tx_ring->buffer_info[i];
2766
2767		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2768		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2769		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2770		context_desc->upper_setup.tcp_fields.tucss = tucss;
2771		context_desc->upper_setup.tcp_fields.tucso = tucso;
2772		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2773		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2774		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2775		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2776
2777		buffer_info->time_stamp = jiffies;
2778		buffer_info->next_to_watch = i;
2779
2780		if (++i == tx_ring->count)
2781			i = 0;
2782
2783		tx_ring->next_to_use = i;
2784
2785		return true;
2786	}
2787	return false;
2788}
2789
2790static bool e1000_tx_csum(struct e1000_adapter *adapter,
2791			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2792			  __be16 protocol)
2793{
2794	struct e1000_context_desc *context_desc;
2795	struct e1000_tx_buffer *buffer_info;
2796	unsigned int i;
2797	u8 css;
2798	u32 cmd_len = E1000_TXD_CMD_DEXT;
2799
2800	if (skb->ip_summed != CHECKSUM_PARTIAL)
2801		return false;
2802
2803	switch (protocol) {
2804	case cpu_to_be16(ETH_P_IP):
2805		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2806			cmd_len |= E1000_TXD_CMD_TCP;
2807		break;
2808	case cpu_to_be16(ETH_P_IPV6):
2809		/* XXX not handling all IPV6 headers */
2810		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2811			cmd_len |= E1000_TXD_CMD_TCP;
2812		break;
2813	default:
2814		if (unlikely(net_ratelimit()))
2815			e_warn(drv, "checksum_partial proto=%x!\n",
2816			       skb->protocol);
2817		break;
2818	}
2819
2820	css = skb_checksum_start_offset(skb);
2821
2822	i = tx_ring->next_to_use;
2823	buffer_info = &tx_ring->buffer_info[i];
2824	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2825
2826	context_desc->lower_setup.ip_config = 0;
2827	context_desc->upper_setup.tcp_fields.tucss = css;
2828	context_desc->upper_setup.tcp_fields.tucso =
2829		css + skb->csum_offset;
2830	context_desc->upper_setup.tcp_fields.tucse = 0;
2831	context_desc->tcp_seg_setup.data = 0;
2832	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2833
2834	buffer_info->time_stamp = jiffies;
2835	buffer_info->next_to_watch = i;
2836
2837	if (unlikely(++i == tx_ring->count))
2838		i = 0;
2839
2840	tx_ring->next_to_use = i;
2841
2842	return true;
2843}
2844
2845#define E1000_MAX_TXD_PWR	12
2846#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2847
2848static int e1000_tx_map(struct e1000_adapter *adapter,
2849			struct e1000_tx_ring *tx_ring,
2850			struct sk_buff *skb, unsigned int first,
2851			unsigned int max_per_txd, unsigned int nr_frags,
2852			unsigned int mss)
2853{
2854	struct e1000_hw *hw = &adapter->hw;
2855	struct pci_dev *pdev = adapter->pdev;
2856	struct e1000_tx_buffer *buffer_info;
2857	unsigned int len = skb_headlen(skb);
2858	unsigned int offset = 0, size, count = 0, i;
2859	unsigned int f, bytecount, segs;
2860
2861	i = tx_ring->next_to_use;
2862
2863	while (len) {
2864		buffer_info = &tx_ring->buffer_info[i];
2865		size = min(len, max_per_txd);
2866		/* Workaround for Controller erratum --
2867		 * descriptor for non-tso packet in a linear SKB that follows a
2868		 * tso gets written back prematurely before the data is fully
2869		 * DMA'd to the controller
2870		 */
2871		if (!skb->data_len && tx_ring->last_tx_tso &&
2872		    !skb_is_gso(skb)) {
2873			tx_ring->last_tx_tso = false;
2874			size -= 4;
2875		}
2876
2877		/* Workaround for premature desc write-backs
2878		 * in TSO mode.  Append 4-byte sentinel desc
2879		 */
2880		if (unlikely(mss && !nr_frags && size == len && size > 8))
2881			size -= 4;
2882		/* work-around for errata 10 and it applies
2883		 * to all controllers in PCI-X mode
2884		 * The fix is to make sure that the first descriptor of a
2885		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2886		 */
2887		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2888			     (size > 2015) && count == 0))
2889			size = 2015;
2890
2891		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2892		 * terminating buffers within evenly-aligned dwords.
2893		 */
2894		if (unlikely(adapter->pcix_82544 &&
2895		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2896		   size > 4))
2897			size -= 4;
2898
2899		buffer_info->length = size;
2900		/* set time_stamp *before* dma to help avoid a possible race */
2901		buffer_info->time_stamp = jiffies;
2902		buffer_info->mapped_as_page = false;
2903		buffer_info->dma = dma_map_single(&pdev->dev,
2904						  skb->data + offset,
2905						  size, DMA_TO_DEVICE);
2906		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2907			goto dma_error;
2908		buffer_info->next_to_watch = i;
2909
2910		len -= size;
2911		offset += size;
2912		count++;
2913		if (len) {
2914			i++;
2915			if (unlikely(i == tx_ring->count))
2916				i = 0;
2917		}
2918	}
2919
2920	for (f = 0; f < nr_frags; f++) {
2921		const struct skb_frag_struct *frag;
2922
2923		frag = &skb_shinfo(skb)->frags[f];
2924		len = skb_frag_size(frag);
2925		offset = 0;
2926
2927		while (len) {
2928			unsigned long bufend;
2929			i++;
2930			if (unlikely(i == tx_ring->count))
2931				i = 0;
2932
2933			buffer_info = &tx_ring->buffer_info[i];
2934			size = min(len, max_per_txd);
2935			/* Workaround for premature desc write-backs
2936			 * in TSO mode.  Append 4-byte sentinel desc
2937			 */
2938			if (unlikely(mss && f == (nr_frags-1) &&
2939			    size == len && size > 8))
2940				size -= 4;
2941			/* Workaround for potential 82544 hang in PCI-X.
2942			 * Avoid terminating buffers within evenly-aligned
2943			 * dwords.
2944			 */
2945			bufend = (unsigned long)
2946				page_to_phys(skb_frag_page(frag));
2947			bufend += offset + size - 1;
2948			if (unlikely(adapter->pcix_82544 &&
2949				     !(bufend & 4) &&
2950				     size > 4))
2951				size -= 4;
2952
2953			buffer_info->length = size;
2954			buffer_info->time_stamp = jiffies;
2955			buffer_info->mapped_as_page = true;
2956			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2957						offset, size, DMA_TO_DEVICE);
2958			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2959				goto dma_error;
2960			buffer_info->next_to_watch = i;
2961
2962			len -= size;
2963			offset += size;
2964			count++;
2965		}
2966	}
2967
2968	segs = skb_shinfo(skb)->gso_segs ?: 1;
2969	/* multiply data chunks by size of headers */
2970	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2971
2972	tx_ring->buffer_info[i].skb = skb;
2973	tx_ring->buffer_info[i].segs = segs;
2974	tx_ring->buffer_info[i].bytecount = bytecount;
2975	tx_ring->buffer_info[first].next_to_watch = i;
2976
2977	return count;
2978
2979dma_error:
2980	dev_err(&pdev->dev, "TX DMA map failed\n");
2981	buffer_info->dma = 0;
2982	if (count)
2983		count--;
2984
2985	while (count--) {
2986		if (i == 0)
2987			i += tx_ring->count;
2988		i--;
2989		buffer_info = &tx_ring->buffer_info[i];
2990		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2991	}
2992
2993	return 0;
2994}
2995
2996static void e1000_tx_queue(struct e1000_adapter *adapter,
2997			   struct e1000_tx_ring *tx_ring, int tx_flags,
2998			   int count)
2999{
3000	struct e1000_tx_desc *tx_desc = NULL;
3001	struct e1000_tx_buffer *buffer_info;
3002	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3003	unsigned int i;
3004
3005	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3006		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3007			     E1000_TXD_CMD_TSE;
3008		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3009
3010		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3011			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3012	}
3013
3014	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3015		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3016		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3017	}
3018
3019	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3020		txd_lower |= E1000_TXD_CMD_VLE;
3021		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3022	}
3023
3024	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3025		txd_lower &= ~(E1000_TXD_CMD_IFCS);
3026
3027	i = tx_ring->next_to_use;
3028
3029	while (count--) {
3030		buffer_info = &tx_ring->buffer_info[i];
3031		tx_desc = E1000_TX_DESC(*tx_ring, i);
3032		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3033		tx_desc->lower.data =
3034			cpu_to_le32(txd_lower | buffer_info->length);
3035		tx_desc->upper.data = cpu_to_le32(txd_upper);
3036		if (unlikely(++i == tx_ring->count))
3037			i = 0;
3038	}
3039
3040	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3041
3042	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3043	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3044		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3045
3046	/* Force memory writes to complete before letting h/w
3047	 * know there are new descriptors to fetch.  (Only
3048	 * applicable for weak-ordered memory model archs,
3049	 * such as IA-64).
3050	 */
3051	wmb();
3052
3053	tx_ring->next_to_use = i;
3054}
3055
3056/* 82547 workaround to avoid controller hang in half-duplex environment.
3057 * The workaround is to avoid queuing a large packet that would span
3058 * the internal Tx FIFO ring boundary by notifying the stack to resend
3059 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3060 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3061 * to the beginning of the Tx FIFO.
3062 */
3063
3064#define E1000_FIFO_HDR			0x10
3065#define E1000_82547_PAD_LEN		0x3E0
3066
3067static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3068				       struct sk_buff *skb)
3069{
3070	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3071	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3072
3073	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3074
3075	if (adapter->link_duplex != HALF_DUPLEX)
3076		goto no_fifo_stall_required;
3077
3078	if (atomic_read(&adapter->tx_fifo_stall))
3079		return 1;
3080
3081	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3082		atomic_set(&adapter->tx_fifo_stall, 1);
3083		return 1;
3084	}
3085
3086no_fifo_stall_required:
3087	adapter->tx_fifo_head += skb_fifo_len;
3088	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3089		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3090	return 0;
3091}
3092
3093static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3094{
3095	struct e1000_adapter *adapter = netdev_priv(netdev);
3096	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3097
3098	netif_stop_queue(netdev);
3099	/* Herbert's original patch had:
3100	 *  smp_mb__after_netif_stop_queue();
3101	 * but since that doesn't exist yet, just open code it.
3102	 */
3103	smp_mb();
3104
3105	/* We need to check again in a case another CPU has just
3106	 * made room available.
3107	 */
3108	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3109		return -EBUSY;
3110
3111	/* A reprieve! */
3112	netif_start_queue(netdev);
3113	++adapter->restart_queue;
3114	return 0;
3115}
3116
3117static int e1000_maybe_stop_tx(struct net_device *netdev,
3118			       struct e1000_tx_ring *tx_ring, int size)
3119{
3120	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3121		return 0;
3122	return __e1000_maybe_stop_tx(netdev, size);
3123}
3124
3125#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3126static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3127				    struct net_device *netdev)
3128{
3129	struct e1000_adapter *adapter = netdev_priv(netdev);
3130	struct e1000_hw *hw = &adapter->hw;
3131	struct e1000_tx_ring *tx_ring;
3132	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3133	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3134	unsigned int tx_flags = 0;
3135	unsigned int len = skb_headlen(skb);
3136	unsigned int nr_frags;
3137	unsigned int mss;
3138	int count = 0;
3139	int tso;
3140	unsigned int f;
3141	__be16 protocol = vlan_get_protocol(skb);
3142
3143	/* This goes back to the question of how to logically map a Tx queue
3144	 * to a flow.  Right now, performance is impacted slightly negatively
3145	 * if using multiple Tx queues.  If the stack breaks away from a
3146	 * single qdisc implementation, we can look at this again.
3147	 */
3148	tx_ring = adapter->tx_ring;
3149
3150	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3151	 * packets may get corrupted during padding by HW.
3152	 * To WA this issue, pad all small packets manually.
3153	 */
3154	if (eth_skb_pad(skb))
3155		return NETDEV_TX_OK;
3156
3157	mss = skb_shinfo(skb)->gso_size;
3158	/* The controller does a simple calculation to
3159	 * make sure there is enough room in the FIFO before
3160	 * initiating the DMA for each buffer.  The calc is:
3161	 * 4 = ceil(buffer len/mss).  To make sure we don't
3162	 * overrun the FIFO, adjust the max buffer len if mss
3163	 * drops.
3164	 */
3165	if (mss) {
3166		u8 hdr_len;
3167		max_per_txd = min(mss << 2, max_per_txd);
3168		max_txd_pwr = fls(max_per_txd) - 1;
3169
3170		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3171		if (skb->data_len && hdr_len == len) {
3172			switch (hw->mac_type) {
 
3173				unsigned int pull_size;
3174			case e1000_82544:
3175				/* Make sure we have room to chop off 4 bytes,
3176				 * and that the end alignment will work out to
3177				 * this hardware's requirements
3178				 * NOTE: this is a TSO only workaround
3179				 * if end byte alignment not correct move us
3180				 * into the next dword
3181				 */
3182				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3183				    & 4)
3184					break;
3185				/* fall through */
3186				pull_size = min((unsigned int)4, skb->data_len);
3187				if (!__pskb_pull_tail(skb, pull_size)) {
3188					e_err(drv, "__pskb_pull_tail "
3189					      "failed.\n");
3190					dev_kfree_skb_any(skb);
3191					return NETDEV_TX_OK;
3192				}
3193				len = skb_headlen(skb);
3194				break;
 
3195			default:
3196				/* do nothing */
3197				break;
3198			}
3199		}
3200	}
3201
3202	/* reserve a descriptor for the offload context */
3203	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3204		count++;
3205	count++;
3206
3207	/* Controller Erratum workaround */
3208	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3209		count++;
3210
3211	count += TXD_USE_COUNT(len, max_txd_pwr);
3212
3213	if (adapter->pcix_82544)
3214		count++;
3215
3216	/* work-around for errata 10 and it applies to all controllers
3217	 * in PCI-X mode, so add one more descriptor to the count
3218	 */
3219	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3220			(len > 2015)))
3221		count++;
3222
3223	nr_frags = skb_shinfo(skb)->nr_frags;
3224	for (f = 0; f < nr_frags; f++)
3225		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3226				       max_txd_pwr);
3227	if (adapter->pcix_82544)
3228		count += nr_frags;
3229
3230	/* need: count + 2 desc gap to keep tail from touching
3231	 * head, otherwise try next time
3232	 */
3233	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3234		return NETDEV_TX_BUSY;
3235
3236	if (unlikely((hw->mac_type == e1000_82547) &&
3237		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3238		netif_stop_queue(netdev);
3239		if (!test_bit(__E1000_DOWN, &adapter->flags))
3240			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3241		return NETDEV_TX_BUSY;
3242	}
3243
3244	if (skb_vlan_tag_present(skb)) {
3245		tx_flags |= E1000_TX_FLAGS_VLAN;
3246		tx_flags |= (skb_vlan_tag_get(skb) <<
3247			     E1000_TX_FLAGS_VLAN_SHIFT);
3248	}
3249
3250	first = tx_ring->next_to_use;
3251
3252	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3253	if (tso < 0) {
3254		dev_kfree_skb_any(skb);
3255		return NETDEV_TX_OK;
3256	}
3257
3258	if (likely(tso)) {
3259		if (likely(hw->mac_type != e1000_82544))
3260			tx_ring->last_tx_tso = true;
3261		tx_flags |= E1000_TX_FLAGS_TSO;
3262	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3263		tx_flags |= E1000_TX_FLAGS_CSUM;
3264
3265	if (protocol == htons(ETH_P_IP))
3266		tx_flags |= E1000_TX_FLAGS_IPV4;
3267
3268	if (unlikely(skb->no_fcs))
3269		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3270
3271	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3272			     nr_frags, mss);
3273
3274	if (count) {
3275		/* The descriptors needed is higher than other Intel drivers
3276		 * due to a number of workarounds.  The breakdown is below:
3277		 * Data descriptors: MAX_SKB_FRAGS + 1
3278		 * Context Descriptor: 1
3279		 * Keep head from touching tail: 2
3280		 * Workarounds: 3
3281		 */
3282		int desc_needed = MAX_SKB_FRAGS + 7;
3283
3284		netdev_sent_queue(netdev, skb->len);
3285		skb_tx_timestamp(skb);
3286
3287		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3288
3289		/* 82544 potentially requires twice as many data descriptors
3290		 * in order to guarantee buffers don't end on evenly-aligned
3291		 * dwords
3292		 */
3293		if (adapter->pcix_82544)
3294			desc_needed += MAX_SKB_FRAGS + 1;
3295
3296		/* Make sure there is space in the ring for the next send. */
3297		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3298
3299		if (!skb->xmit_more ||
3300		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3301			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3302			/* we need this if more than one processor can write to
3303			 * our tail at a time, it synchronizes IO on IA64/Altix
3304			 * systems
3305			 */
3306			mmiowb();
3307		}
3308	} else {
3309		dev_kfree_skb_any(skb);
3310		tx_ring->buffer_info[first].time_stamp = 0;
3311		tx_ring->next_to_use = first;
3312	}
3313
3314	return NETDEV_TX_OK;
3315}
3316
3317#define NUM_REGS 38 /* 1 based count */
3318static void e1000_regdump(struct e1000_adapter *adapter)
3319{
3320	struct e1000_hw *hw = &adapter->hw;
3321	u32 regs[NUM_REGS];
3322	u32 *regs_buff = regs;
3323	int i = 0;
3324
3325	static const char * const reg_name[] = {
3326		"CTRL",  "STATUS",
3327		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3328		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3329		"TIDV", "TXDCTL", "TADV", "TARC0",
3330		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3331		"TXDCTL1", "TARC1",
3332		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3333		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3334		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3335	};
3336
3337	regs_buff[0]  = er32(CTRL);
3338	regs_buff[1]  = er32(STATUS);
3339
3340	regs_buff[2]  = er32(RCTL);
3341	regs_buff[3]  = er32(RDLEN);
3342	regs_buff[4]  = er32(RDH);
3343	regs_buff[5]  = er32(RDT);
3344	regs_buff[6]  = er32(RDTR);
3345
3346	regs_buff[7]  = er32(TCTL);
3347	regs_buff[8]  = er32(TDBAL);
3348	regs_buff[9]  = er32(TDBAH);
3349	regs_buff[10] = er32(TDLEN);
3350	regs_buff[11] = er32(TDH);
3351	regs_buff[12] = er32(TDT);
3352	regs_buff[13] = er32(TIDV);
3353	regs_buff[14] = er32(TXDCTL);
3354	regs_buff[15] = er32(TADV);
3355	regs_buff[16] = er32(TARC0);
3356
3357	regs_buff[17] = er32(TDBAL1);
3358	regs_buff[18] = er32(TDBAH1);
3359	regs_buff[19] = er32(TDLEN1);
3360	regs_buff[20] = er32(TDH1);
3361	regs_buff[21] = er32(TDT1);
3362	regs_buff[22] = er32(TXDCTL1);
3363	regs_buff[23] = er32(TARC1);
3364	regs_buff[24] = er32(CTRL_EXT);
3365	regs_buff[25] = er32(ERT);
3366	regs_buff[26] = er32(RDBAL0);
3367	regs_buff[27] = er32(RDBAH0);
3368	regs_buff[28] = er32(TDFH);
3369	regs_buff[29] = er32(TDFT);
3370	regs_buff[30] = er32(TDFHS);
3371	regs_buff[31] = er32(TDFTS);
3372	regs_buff[32] = er32(TDFPC);
3373	regs_buff[33] = er32(RDFH);
3374	regs_buff[34] = er32(RDFT);
3375	regs_buff[35] = er32(RDFHS);
3376	regs_buff[36] = er32(RDFTS);
3377	regs_buff[37] = er32(RDFPC);
3378
3379	pr_info("Register dump\n");
3380	for (i = 0; i < NUM_REGS; i++)
3381		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3382}
3383
3384/*
3385 * e1000_dump: Print registers, tx ring and rx ring
3386 */
3387static void e1000_dump(struct e1000_adapter *adapter)
3388{
3389	/* this code doesn't handle multiple rings */
3390	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3391	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3392	int i;
3393
3394	if (!netif_msg_hw(adapter))
3395		return;
3396
3397	/* Print Registers */
3398	e1000_regdump(adapter);
3399
3400	/* transmit dump */
3401	pr_info("TX Desc ring0 dump\n");
3402
3403	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3404	 *
3405	 * Legacy Transmit Descriptor
3406	 *   +--------------------------------------------------------------+
3407	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3408	 *   +--------------------------------------------------------------+
3409	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3410	 *   +--------------------------------------------------------------+
3411	 *   63       48 47        36 35    32 31     24 23    16 15        0
3412	 *
3413	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3414	 *   63      48 47    40 39       32 31             16 15    8 7      0
3415	 *   +----------------------------------------------------------------+
3416	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3417	 *   +----------------------------------------------------------------+
3418	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3419	 *   +----------------------------------------------------------------+
3420	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3421	 *
3422	 * Extended Data Descriptor (DTYP=0x1)
3423	 *   +----------------------------------------------------------------+
3424	 * 0 |                     Buffer Address [63:0]                      |
3425	 *   +----------------------------------------------------------------+
3426	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3427	 *   +----------------------------------------------------------------+
3428	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3429	 */
3430	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3431	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3432
3433	if (!netif_msg_tx_done(adapter))
3434		goto rx_ring_summary;
3435
3436	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3437		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3438		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3439		struct my_u { __le64 a; __le64 b; };
3440		struct my_u *u = (struct my_u *)tx_desc;
3441		const char *type;
3442
3443		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3444			type = "NTC/U";
3445		else if (i == tx_ring->next_to_use)
3446			type = "NTU";
3447		else if (i == tx_ring->next_to_clean)
3448			type = "NTC";
3449		else
3450			type = "";
3451
3452		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3453			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3454			le64_to_cpu(u->a), le64_to_cpu(u->b),
3455			(u64)buffer_info->dma, buffer_info->length,
3456			buffer_info->next_to_watch,
3457			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3458	}
3459
3460rx_ring_summary:
3461	/* receive dump */
3462	pr_info("\nRX Desc ring dump\n");
3463
3464	/* Legacy Receive Descriptor Format
3465	 *
3466	 * +-----------------------------------------------------+
3467	 * |                Buffer Address [63:0]                |
3468	 * +-----------------------------------------------------+
3469	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3470	 * +-----------------------------------------------------+
3471	 * 63       48 47    40 39      32 31         16 15      0
3472	 */
3473	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3474
3475	if (!netif_msg_rx_status(adapter))
3476		goto exit;
3477
3478	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3479		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3480		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3481		struct my_u { __le64 a; __le64 b; };
3482		struct my_u *u = (struct my_u *)rx_desc;
3483		const char *type;
3484
3485		if (i == rx_ring->next_to_use)
3486			type = "NTU";
3487		else if (i == rx_ring->next_to_clean)
3488			type = "NTC";
3489		else
3490			type = "";
3491
3492		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3493			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3494			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3495	} /* for */
3496
3497	/* dump the descriptor caches */
3498	/* rx */
3499	pr_info("Rx descriptor cache in 64bit format\n");
3500	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3501		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3502			i,
3503			readl(adapter->hw.hw_addr + i+4),
3504			readl(adapter->hw.hw_addr + i),
3505			readl(adapter->hw.hw_addr + i+12),
3506			readl(adapter->hw.hw_addr + i+8));
3507	}
3508	/* tx */
3509	pr_info("Tx descriptor cache in 64bit format\n");
3510	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3511		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3512			i,
3513			readl(adapter->hw.hw_addr + i+4),
3514			readl(adapter->hw.hw_addr + i),
3515			readl(adapter->hw.hw_addr + i+12),
3516			readl(adapter->hw.hw_addr + i+8));
3517	}
3518exit:
3519	return;
3520}
3521
3522/**
3523 * e1000_tx_timeout - Respond to a Tx Hang
3524 * @netdev: network interface device structure
 
3525 **/
3526static void e1000_tx_timeout(struct net_device *netdev)
3527{
3528	struct e1000_adapter *adapter = netdev_priv(netdev);
3529
3530	/* Do the reset outside of interrupt context */
3531	adapter->tx_timeout_count++;
3532	schedule_work(&adapter->reset_task);
3533}
3534
3535static void e1000_reset_task(struct work_struct *work)
3536{
3537	struct e1000_adapter *adapter =
3538		container_of(work, struct e1000_adapter, reset_task);
3539
3540	e_err(drv, "Reset adapter\n");
 
3541	e1000_reinit_locked(adapter);
 
3542}
3543
3544/**
3545 * e1000_change_mtu - Change the Maximum Transfer Unit
3546 * @netdev: network interface device structure
3547 * @new_mtu: new value for maximum frame size
3548 *
3549 * Returns 0 on success, negative on failure
3550 **/
3551static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3552{
3553	struct e1000_adapter *adapter = netdev_priv(netdev);
3554	struct e1000_hw *hw = &adapter->hw;
3555	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3556
3557	/* Adapter-specific max frame size limits. */
3558	switch (hw->mac_type) {
3559	case e1000_undefined ... e1000_82542_rev2_1:
3560		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3561			e_err(probe, "Jumbo Frames not supported.\n");
3562			return -EINVAL;
3563		}
3564		break;
3565	default:
3566		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3567		break;
3568	}
3569
3570	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3571		msleep(1);
3572	/* e1000_down has a dependency on max_frame_size */
3573	hw->max_frame_size = max_frame;
3574	if (netif_running(netdev)) {
3575		/* prevent buffers from being reallocated */
3576		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3577		e1000_down(adapter);
3578	}
3579
3580	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3581	 * means we reserve 2 more, this pushes us to allocate from the next
3582	 * larger slab size.
3583	 * i.e. RXBUFFER_2048 --> size-4096 slab
3584	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3585	 * fragmented skbs
3586	 */
3587
3588	if (max_frame <= E1000_RXBUFFER_2048)
3589		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3590	else
3591#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3592		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3593#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3594		adapter->rx_buffer_len = PAGE_SIZE;
3595#endif
3596
3597	/* adjust allocation if LPE protects us, and we aren't using SBP */
3598	if (!hw->tbi_compatibility_on &&
3599	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3600	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3601		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3602
3603	pr_info("%s changing MTU from %d to %d\n",
3604		netdev->name, netdev->mtu, new_mtu);
3605	netdev->mtu = new_mtu;
3606
3607	if (netif_running(netdev))
3608		e1000_up(adapter);
3609	else
3610		e1000_reset(adapter);
3611
3612	clear_bit(__E1000_RESETTING, &adapter->flags);
3613
3614	return 0;
3615}
3616
3617/**
3618 * e1000_update_stats - Update the board statistics counters
3619 * @adapter: board private structure
3620 **/
3621void e1000_update_stats(struct e1000_adapter *adapter)
3622{
3623	struct net_device *netdev = adapter->netdev;
3624	struct e1000_hw *hw = &adapter->hw;
3625	struct pci_dev *pdev = adapter->pdev;
3626	unsigned long flags;
3627	u16 phy_tmp;
3628
3629#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3630
3631	/* Prevent stats update while adapter is being reset, or if the pci
3632	 * connection is down.
3633	 */
3634	if (adapter->link_speed == 0)
3635		return;
3636	if (pci_channel_offline(pdev))
3637		return;
3638
3639	spin_lock_irqsave(&adapter->stats_lock, flags);
3640
3641	/* these counters are modified from e1000_tbi_adjust_stats,
3642	 * called from the interrupt context, so they must only
3643	 * be written while holding adapter->stats_lock
3644	 */
3645
3646	adapter->stats.crcerrs += er32(CRCERRS);
3647	adapter->stats.gprc += er32(GPRC);
3648	adapter->stats.gorcl += er32(GORCL);
3649	adapter->stats.gorch += er32(GORCH);
3650	adapter->stats.bprc += er32(BPRC);
3651	adapter->stats.mprc += er32(MPRC);
3652	adapter->stats.roc += er32(ROC);
3653
3654	adapter->stats.prc64 += er32(PRC64);
3655	adapter->stats.prc127 += er32(PRC127);
3656	adapter->stats.prc255 += er32(PRC255);
3657	adapter->stats.prc511 += er32(PRC511);
3658	adapter->stats.prc1023 += er32(PRC1023);
3659	adapter->stats.prc1522 += er32(PRC1522);
3660
3661	adapter->stats.symerrs += er32(SYMERRS);
3662	adapter->stats.mpc += er32(MPC);
3663	adapter->stats.scc += er32(SCC);
3664	adapter->stats.ecol += er32(ECOL);
3665	adapter->stats.mcc += er32(MCC);
3666	adapter->stats.latecol += er32(LATECOL);
3667	adapter->stats.dc += er32(DC);
3668	adapter->stats.sec += er32(SEC);
3669	adapter->stats.rlec += er32(RLEC);
3670	adapter->stats.xonrxc += er32(XONRXC);
3671	adapter->stats.xontxc += er32(XONTXC);
3672	adapter->stats.xoffrxc += er32(XOFFRXC);
3673	adapter->stats.xofftxc += er32(XOFFTXC);
3674	adapter->stats.fcruc += er32(FCRUC);
3675	adapter->stats.gptc += er32(GPTC);
3676	adapter->stats.gotcl += er32(GOTCL);
3677	adapter->stats.gotch += er32(GOTCH);
3678	adapter->stats.rnbc += er32(RNBC);
3679	adapter->stats.ruc += er32(RUC);
3680	adapter->stats.rfc += er32(RFC);
3681	adapter->stats.rjc += er32(RJC);
3682	adapter->stats.torl += er32(TORL);
3683	adapter->stats.torh += er32(TORH);
3684	adapter->stats.totl += er32(TOTL);
3685	adapter->stats.toth += er32(TOTH);
3686	adapter->stats.tpr += er32(TPR);
3687
3688	adapter->stats.ptc64 += er32(PTC64);
3689	adapter->stats.ptc127 += er32(PTC127);
3690	adapter->stats.ptc255 += er32(PTC255);
3691	adapter->stats.ptc511 += er32(PTC511);
3692	adapter->stats.ptc1023 += er32(PTC1023);
3693	adapter->stats.ptc1522 += er32(PTC1522);
3694
3695	adapter->stats.mptc += er32(MPTC);
3696	adapter->stats.bptc += er32(BPTC);
3697
3698	/* used for adaptive IFS */
3699
3700	hw->tx_packet_delta = er32(TPT);
3701	adapter->stats.tpt += hw->tx_packet_delta;
3702	hw->collision_delta = er32(COLC);
3703	adapter->stats.colc += hw->collision_delta;
3704
3705	if (hw->mac_type >= e1000_82543) {
3706		adapter->stats.algnerrc += er32(ALGNERRC);
3707		adapter->stats.rxerrc += er32(RXERRC);
3708		adapter->stats.tncrs += er32(TNCRS);
3709		adapter->stats.cexterr += er32(CEXTERR);
3710		adapter->stats.tsctc += er32(TSCTC);
3711		adapter->stats.tsctfc += er32(TSCTFC);
3712	}
3713
3714	/* Fill out the OS statistics structure */
3715	netdev->stats.multicast = adapter->stats.mprc;
3716	netdev->stats.collisions = adapter->stats.colc;
3717
3718	/* Rx Errors */
3719
3720	/* RLEC on some newer hardware can be incorrect so build
3721	 * our own version based on RUC and ROC
3722	 */
3723	netdev->stats.rx_errors = adapter->stats.rxerrc +
3724		adapter->stats.crcerrs + adapter->stats.algnerrc +
3725		adapter->stats.ruc + adapter->stats.roc +
3726		adapter->stats.cexterr;
3727	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3728	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3729	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3730	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3731	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3732
3733	/* Tx Errors */
3734	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3735	netdev->stats.tx_errors = adapter->stats.txerrc;
3736	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3737	netdev->stats.tx_window_errors = adapter->stats.latecol;
3738	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3739	if (hw->bad_tx_carr_stats_fd &&
3740	    adapter->link_duplex == FULL_DUPLEX) {
3741		netdev->stats.tx_carrier_errors = 0;
3742		adapter->stats.tncrs = 0;
3743	}
3744
3745	/* Tx Dropped needs to be maintained elsewhere */
3746
3747	/* Phy Stats */
3748	if (hw->media_type == e1000_media_type_copper) {
3749		if ((adapter->link_speed == SPEED_1000) &&
3750		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3751			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3752			adapter->phy_stats.idle_errors += phy_tmp;
3753		}
3754
3755		if ((hw->mac_type <= e1000_82546) &&
3756		   (hw->phy_type == e1000_phy_m88) &&
3757		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3758			adapter->phy_stats.receive_errors += phy_tmp;
3759	}
3760
3761	/* Management Stats */
3762	if (hw->has_smbus) {
3763		adapter->stats.mgptc += er32(MGTPTC);
3764		adapter->stats.mgprc += er32(MGTPRC);
3765		adapter->stats.mgpdc += er32(MGTPDC);
3766	}
3767
3768	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3769}
3770
3771/**
3772 * e1000_intr - Interrupt Handler
3773 * @irq: interrupt number
3774 * @data: pointer to a network interface device structure
3775 **/
3776static irqreturn_t e1000_intr(int irq, void *data)
3777{
3778	struct net_device *netdev = data;
3779	struct e1000_adapter *adapter = netdev_priv(netdev);
3780	struct e1000_hw *hw = &adapter->hw;
3781	u32 icr = er32(ICR);
3782
3783	if (unlikely((!icr)))
3784		return IRQ_NONE;  /* Not our interrupt */
3785
3786	/* we might have caused the interrupt, but the above
3787	 * read cleared it, and just in case the driver is
3788	 * down there is nothing to do so return handled
3789	 */
3790	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3791		return IRQ_HANDLED;
3792
3793	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3794		hw->get_link_status = 1;
3795		/* guard against interrupt when we're going down */
3796		if (!test_bit(__E1000_DOWN, &adapter->flags))
3797			schedule_delayed_work(&adapter->watchdog_task, 1);
3798	}
3799
3800	/* disable interrupts, without the synchronize_irq bit */
3801	ew32(IMC, ~0);
3802	E1000_WRITE_FLUSH();
3803
3804	if (likely(napi_schedule_prep(&adapter->napi))) {
3805		adapter->total_tx_bytes = 0;
3806		adapter->total_tx_packets = 0;
3807		adapter->total_rx_bytes = 0;
3808		adapter->total_rx_packets = 0;
3809		__napi_schedule(&adapter->napi);
3810	} else {
3811		/* this really should not happen! if it does it is basically a
3812		 * bug, but not a hard error, so enable ints and continue
3813		 */
3814		if (!test_bit(__E1000_DOWN, &adapter->flags))
3815			e1000_irq_enable(adapter);
3816	}
3817
3818	return IRQ_HANDLED;
3819}
3820
3821/**
3822 * e1000_clean - NAPI Rx polling callback
3823 * @adapter: board private structure
 
3824 **/
3825static int e1000_clean(struct napi_struct *napi, int budget)
3826{
3827	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3828						     napi);
3829	int tx_clean_complete = 0, work_done = 0;
3830
3831	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3832
3833	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3834
3835	if (!tx_clean_complete)
3836		work_done = budget;
3837
3838	/* If budget not fully consumed, exit the polling mode */
3839	if (work_done < budget) {
 
 
3840		if (likely(adapter->itr_setting & 3))
3841			e1000_set_itr(adapter);
3842		napi_complete_done(napi, work_done);
3843		if (!test_bit(__E1000_DOWN, &adapter->flags))
3844			e1000_irq_enable(adapter);
3845	}
3846
3847	return work_done;
3848}
3849
3850/**
3851 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3852 * @adapter: board private structure
 
3853 **/
3854static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3855			       struct e1000_tx_ring *tx_ring)
3856{
3857	struct e1000_hw *hw = &adapter->hw;
3858	struct net_device *netdev = adapter->netdev;
3859	struct e1000_tx_desc *tx_desc, *eop_desc;
3860	struct e1000_tx_buffer *buffer_info;
3861	unsigned int i, eop;
3862	unsigned int count = 0;
3863	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3864	unsigned int bytes_compl = 0, pkts_compl = 0;
3865
3866	i = tx_ring->next_to_clean;
3867	eop = tx_ring->buffer_info[i].next_to_watch;
3868	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3869
3870	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3871	       (count < tx_ring->count)) {
3872		bool cleaned = false;
3873		dma_rmb();	/* read buffer_info after eop_desc */
3874		for ( ; !cleaned; count++) {
3875			tx_desc = E1000_TX_DESC(*tx_ring, i);
3876			buffer_info = &tx_ring->buffer_info[i];
3877			cleaned = (i == eop);
3878
3879			if (cleaned) {
3880				total_tx_packets += buffer_info->segs;
3881				total_tx_bytes += buffer_info->bytecount;
3882				if (buffer_info->skb) {
3883					bytes_compl += buffer_info->skb->len;
3884					pkts_compl++;
3885				}
3886
3887			}
3888			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 
3889			tx_desc->upper.data = 0;
3890
3891			if (unlikely(++i == tx_ring->count))
3892				i = 0;
3893		}
3894
3895		eop = tx_ring->buffer_info[i].next_to_watch;
3896		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3897	}
3898
3899	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3900	 * which will reuse the cleaned buffers.
3901	 */
3902	smp_store_release(&tx_ring->next_to_clean, i);
3903
3904	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3905
3906#define TX_WAKE_THRESHOLD 32
3907	if (unlikely(count && netif_carrier_ok(netdev) &&
3908		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3909		/* Make sure that anybody stopping the queue after this
3910		 * sees the new next_to_clean.
3911		 */
3912		smp_mb();
3913
3914		if (netif_queue_stopped(netdev) &&
3915		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3916			netif_wake_queue(netdev);
3917			++adapter->restart_queue;
3918		}
3919	}
3920
3921	if (adapter->detect_tx_hung) {
3922		/* Detect a transmit hang in hardware, this serializes the
3923		 * check with the clearing of time_stamp and movement of i
3924		 */
3925		adapter->detect_tx_hung = false;
3926		if (tx_ring->buffer_info[eop].time_stamp &&
3927		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3928			       (adapter->tx_timeout_factor * HZ)) &&
3929		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3930
3931			/* detected Tx unit hang */
3932			e_err(drv, "Detected Tx Unit Hang\n"
3933			      "  Tx Queue             <%lu>\n"
3934			      "  TDH                  <%x>\n"
3935			      "  TDT                  <%x>\n"
3936			      "  next_to_use          <%x>\n"
3937			      "  next_to_clean        <%x>\n"
3938			      "buffer_info[next_to_clean]\n"
3939			      "  time_stamp           <%lx>\n"
3940			      "  next_to_watch        <%x>\n"
3941			      "  jiffies              <%lx>\n"
3942			      "  next_to_watch.status <%x>\n",
3943				(unsigned long)(tx_ring - adapter->tx_ring),
3944				readl(hw->hw_addr + tx_ring->tdh),
3945				readl(hw->hw_addr + tx_ring->tdt),
3946				tx_ring->next_to_use,
3947				tx_ring->next_to_clean,
3948				tx_ring->buffer_info[eop].time_stamp,
3949				eop,
3950				jiffies,
3951				eop_desc->upper.fields.status);
3952			e1000_dump(adapter);
3953			netif_stop_queue(netdev);
3954		}
3955	}
3956	adapter->total_tx_bytes += total_tx_bytes;
3957	adapter->total_tx_packets += total_tx_packets;
3958	netdev->stats.tx_bytes += total_tx_bytes;
3959	netdev->stats.tx_packets += total_tx_packets;
3960	return count < tx_ring->count;
3961}
3962
3963/**
3964 * e1000_rx_checksum - Receive Checksum Offload for 82543
3965 * @adapter:     board private structure
3966 * @status_err:  receive descriptor status and error fields
3967 * @csum:        receive descriptor csum field
3968 * @sk_buff:     socket buffer with received data
3969 **/
3970static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3971			      u32 csum, struct sk_buff *skb)
3972{
3973	struct e1000_hw *hw = &adapter->hw;
3974	u16 status = (u16)status_err;
3975	u8 errors = (u8)(status_err >> 24);
3976
3977	skb_checksum_none_assert(skb);
3978
3979	/* 82543 or newer only */
3980	if (unlikely(hw->mac_type < e1000_82543))
3981		return;
3982	/* Ignore Checksum bit is set */
3983	if (unlikely(status & E1000_RXD_STAT_IXSM))
3984		return;
3985	/* TCP/UDP checksum error bit is set */
3986	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3987		/* let the stack verify checksum errors */
3988		adapter->hw_csum_err++;
3989		return;
3990	}
3991	/* TCP/UDP Checksum has not been calculated */
3992	if (!(status & E1000_RXD_STAT_TCPCS))
3993		return;
3994
3995	/* It must be a TCP or UDP packet with a valid checksum */
3996	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3997		/* TCP checksum is good */
3998		skb->ip_summed = CHECKSUM_UNNECESSARY;
3999	}
4000	adapter->hw_csum_good++;
4001}
4002
4003/**
4004 * e1000_consume_page - helper function for jumbo Rx path
 
 
 
4005 **/
4006static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4007			       u16 length)
4008{
4009	bi->rxbuf.page = NULL;
4010	skb->len += length;
4011	skb->data_len += length;
4012	skb->truesize += PAGE_SIZE;
4013}
4014
4015/**
4016 * e1000_receive_skb - helper function to handle rx indications
4017 * @adapter: board private structure
4018 * @status: descriptor status field as written by hardware
4019 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4020 * @skb: pointer to sk_buff to be indicated to stack
4021 */
4022static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4023			      __le16 vlan, struct sk_buff *skb)
4024{
4025	skb->protocol = eth_type_trans(skb, adapter->netdev);
4026
4027	if (status & E1000_RXD_STAT_VP) {
4028		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4029
4030		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4031	}
4032	napi_gro_receive(&adapter->napi, skb);
4033}
4034
4035/**
4036 * e1000_tbi_adjust_stats
4037 * @hw: Struct containing variables accessed by shared code
 
4038 * @frame_len: The length of the frame in question
4039 * @mac_addr: The Ethernet destination address of the frame in question
4040 *
4041 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4042 */
4043static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4044				   struct e1000_hw_stats *stats,
4045				   u32 frame_len, const u8 *mac_addr)
4046{
4047	u64 carry_bit;
4048
4049	/* First adjust the frame length. */
4050	frame_len--;
4051	/* We need to adjust the statistics counters, since the hardware
4052	 * counters overcount this packet as a CRC error and undercount
4053	 * the packet as a good packet
4054	 */
4055	/* This packet should not be counted as a CRC error. */
4056	stats->crcerrs--;
4057	/* This packet does count as a Good Packet Received. */
4058	stats->gprc++;
4059
4060	/* Adjust the Good Octets received counters */
4061	carry_bit = 0x80000000 & stats->gorcl;
4062	stats->gorcl += frame_len;
4063	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4064	 * Received Count) was one before the addition,
4065	 * AND it is zero after, then we lost the carry out,
4066	 * need to add one to Gorch (Good Octets Received Count High).
4067	 * This could be simplified if all environments supported
4068	 * 64-bit integers.
4069	 */
4070	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4071		stats->gorch++;
4072	/* Is this a broadcast or multicast?  Check broadcast first,
4073	 * since the test for a multicast frame will test positive on
4074	 * a broadcast frame.
4075	 */
4076	if (is_broadcast_ether_addr(mac_addr))
4077		stats->bprc++;
4078	else if (is_multicast_ether_addr(mac_addr))
4079		stats->mprc++;
4080
4081	if (frame_len == hw->max_frame_size) {
4082		/* In this case, the hardware has overcounted the number of
4083		 * oversize frames.
4084		 */
4085		if (stats->roc > 0)
4086			stats->roc--;
4087	}
4088
4089	/* Adjust the bin counters when the extra byte put the frame in the
4090	 * wrong bin. Remember that the frame_len was adjusted above.
4091	 */
4092	if (frame_len == 64) {
4093		stats->prc64++;
4094		stats->prc127--;
4095	} else if (frame_len == 127) {
4096		stats->prc127++;
4097		stats->prc255--;
4098	} else if (frame_len == 255) {
4099		stats->prc255++;
4100		stats->prc511--;
4101	} else if (frame_len == 511) {
4102		stats->prc511++;
4103		stats->prc1023--;
4104	} else if (frame_len == 1023) {
4105		stats->prc1023++;
4106		stats->prc1522--;
4107	} else if (frame_len == 1522) {
4108		stats->prc1522++;
4109	}
4110}
4111
4112static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4113				    u8 status, u8 errors,
4114				    u32 length, const u8 *data)
4115{
4116	struct e1000_hw *hw = &adapter->hw;
4117	u8 last_byte = *(data + length - 1);
4118
4119	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4120		unsigned long irq_flags;
4121
4122		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4123		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4124		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4125
4126		return true;
4127	}
4128
4129	return false;
4130}
4131
4132static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4133					  unsigned int bufsz)
4134{
4135	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4136
4137	if (unlikely(!skb))
4138		adapter->alloc_rx_buff_failed++;
4139	return skb;
4140}
4141
4142/**
4143 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4144 * @adapter: board private structure
4145 * @rx_ring: ring to clean
4146 * @work_done: amount of napi work completed this call
4147 * @work_to_do: max amount of work allowed for this call to do
4148 *
4149 * the return value indicates whether actual cleaning was done, there
4150 * is no guarantee that everything was cleaned
4151 */
4152static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4153				     struct e1000_rx_ring *rx_ring,
4154				     int *work_done, int work_to_do)
4155{
4156	struct net_device *netdev = adapter->netdev;
4157	struct pci_dev *pdev = adapter->pdev;
4158	struct e1000_rx_desc *rx_desc, *next_rxd;
4159	struct e1000_rx_buffer *buffer_info, *next_buffer;
4160	u32 length;
4161	unsigned int i;
4162	int cleaned_count = 0;
4163	bool cleaned = false;
4164	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4165
4166	i = rx_ring->next_to_clean;
4167	rx_desc = E1000_RX_DESC(*rx_ring, i);
4168	buffer_info = &rx_ring->buffer_info[i];
4169
4170	while (rx_desc->status & E1000_RXD_STAT_DD) {
4171		struct sk_buff *skb;
4172		u8 status;
4173
4174		if (*work_done >= work_to_do)
4175			break;
4176		(*work_done)++;
4177		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4178
4179		status = rx_desc->status;
4180
4181		if (++i == rx_ring->count)
4182			i = 0;
4183
4184		next_rxd = E1000_RX_DESC(*rx_ring, i);
4185		prefetch(next_rxd);
4186
4187		next_buffer = &rx_ring->buffer_info[i];
4188
4189		cleaned = true;
4190		cleaned_count++;
4191		dma_unmap_page(&pdev->dev, buffer_info->dma,
4192			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4193		buffer_info->dma = 0;
4194
4195		length = le16_to_cpu(rx_desc->length);
4196
4197		/* errors is only valid for DD + EOP descriptors */
4198		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4199		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4200			u8 *mapped = page_address(buffer_info->rxbuf.page);
4201
4202			if (e1000_tbi_should_accept(adapter, status,
4203						    rx_desc->errors,
4204						    length, mapped)) {
4205				length--;
4206			} else if (netdev->features & NETIF_F_RXALL) {
4207				goto process_skb;
4208			} else {
4209				/* an error means any chain goes out the window
4210				 * too
4211				 */
4212				if (rx_ring->rx_skb_top)
4213					dev_kfree_skb(rx_ring->rx_skb_top);
4214				rx_ring->rx_skb_top = NULL;
4215				goto next_desc;
4216			}
4217		}
4218
4219#define rxtop rx_ring->rx_skb_top
4220process_skb:
4221		if (!(status & E1000_RXD_STAT_EOP)) {
4222			/* this descriptor is only the beginning (or middle) */
4223			if (!rxtop) {
4224				/* this is the beginning of a chain */
4225				rxtop = napi_get_frags(&adapter->napi);
4226				if (!rxtop)
4227					break;
4228
4229				skb_fill_page_desc(rxtop, 0,
4230						   buffer_info->rxbuf.page,
4231						   0, length);
4232			} else {
4233				/* this is the middle of a chain */
4234				skb_fill_page_desc(rxtop,
4235				    skb_shinfo(rxtop)->nr_frags,
4236				    buffer_info->rxbuf.page, 0, length);
4237			}
4238			e1000_consume_page(buffer_info, rxtop, length);
4239			goto next_desc;
4240		} else {
4241			if (rxtop) {
4242				/* end of the chain */
4243				skb_fill_page_desc(rxtop,
4244				    skb_shinfo(rxtop)->nr_frags,
4245				    buffer_info->rxbuf.page, 0, length);
4246				skb = rxtop;
4247				rxtop = NULL;
4248				e1000_consume_page(buffer_info, skb, length);
4249			} else {
4250				struct page *p;
4251				/* no chain, got EOP, this buf is the packet
4252				 * copybreak to save the put_page/alloc_page
4253				 */
4254				p = buffer_info->rxbuf.page;
4255				if (length <= copybreak) {
4256					u8 *vaddr;
4257
4258					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4259						length -= 4;
4260					skb = e1000_alloc_rx_skb(adapter,
4261								 length);
4262					if (!skb)
4263						break;
4264
4265					vaddr = kmap_atomic(p);
4266					memcpy(skb_tail_pointer(skb), vaddr,
4267					       length);
4268					kunmap_atomic(vaddr);
4269					/* re-use the page, so don't erase
4270					 * buffer_info->rxbuf.page
4271					 */
4272					skb_put(skb, length);
4273					e1000_rx_checksum(adapter,
4274							  status | rx_desc->errors << 24,
4275							  le16_to_cpu(rx_desc->csum), skb);
4276
4277					total_rx_bytes += skb->len;
4278					total_rx_packets++;
4279
4280					e1000_receive_skb(adapter, status,
4281							  rx_desc->special, skb);
4282					goto next_desc;
4283				} else {
4284					skb = napi_get_frags(&adapter->napi);
4285					if (!skb) {
4286						adapter->alloc_rx_buff_failed++;
4287						break;
4288					}
4289					skb_fill_page_desc(skb, 0, p, 0,
4290							   length);
4291					e1000_consume_page(buffer_info, skb,
4292							   length);
4293				}
4294			}
4295		}
4296
4297		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4298		e1000_rx_checksum(adapter,
4299				  (u32)(status) |
4300				  ((u32)(rx_desc->errors) << 24),
4301				  le16_to_cpu(rx_desc->csum), skb);
4302
4303		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4304		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4305			pskb_trim(skb, skb->len - 4);
4306		total_rx_packets++;
4307
4308		if (status & E1000_RXD_STAT_VP) {
4309			__le16 vlan = rx_desc->special;
4310			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4311
4312			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4313		}
4314
4315		napi_gro_frags(&adapter->napi);
4316
4317next_desc:
4318		rx_desc->status = 0;
4319
4320		/* return some buffers to hardware, one at a time is too slow */
4321		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4322			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4323			cleaned_count = 0;
4324		}
4325
4326		/* use prefetched values */
4327		rx_desc = next_rxd;
4328		buffer_info = next_buffer;
4329	}
4330	rx_ring->next_to_clean = i;
4331
4332	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4333	if (cleaned_count)
4334		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4335
4336	adapter->total_rx_packets += total_rx_packets;
4337	adapter->total_rx_bytes += total_rx_bytes;
4338	netdev->stats.rx_bytes += total_rx_bytes;
4339	netdev->stats.rx_packets += total_rx_packets;
4340	return cleaned;
4341}
4342
4343/* this should improve performance for small packets with large amounts
4344 * of reassembly being done in the stack
4345 */
4346static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4347				       struct e1000_rx_buffer *buffer_info,
4348				       u32 length, const void *data)
4349{
4350	struct sk_buff *skb;
4351
4352	if (length > copybreak)
4353		return NULL;
4354
4355	skb = e1000_alloc_rx_skb(adapter, length);
4356	if (!skb)
4357		return NULL;
4358
4359	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4360				length, DMA_FROM_DEVICE);
4361
4362	skb_put_data(skb, data, length);
4363
4364	return skb;
4365}
4366
4367/**
4368 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4369 * @adapter: board private structure
4370 * @rx_ring: ring to clean
4371 * @work_done: amount of napi work completed this call
4372 * @work_to_do: max amount of work allowed for this call to do
4373 */
4374static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4375			       struct e1000_rx_ring *rx_ring,
4376			       int *work_done, int work_to_do)
4377{
4378	struct net_device *netdev = adapter->netdev;
4379	struct pci_dev *pdev = adapter->pdev;
4380	struct e1000_rx_desc *rx_desc, *next_rxd;
4381	struct e1000_rx_buffer *buffer_info, *next_buffer;
4382	u32 length;
4383	unsigned int i;
4384	int cleaned_count = 0;
4385	bool cleaned = false;
4386	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4387
4388	i = rx_ring->next_to_clean;
4389	rx_desc = E1000_RX_DESC(*rx_ring, i);
4390	buffer_info = &rx_ring->buffer_info[i];
4391
4392	while (rx_desc->status & E1000_RXD_STAT_DD) {
4393		struct sk_buff *skb;
4394		u8 *data;
4395		u8 status;
4396
4397		if (*work_done >= work_to_do)
4398			break;
4399		(*work_done)++;
4400		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4401
4402		status = rx_desc->status;
4403		length = le16_to_cpu(rx_desc->length);
4404
4405		data = buffer_info->rxbuf.data;
4406		prefetch(data);
4407		skb = e1000_copybreak(adapter, buffer_info, length, data);
4408		if (!skb) {
4409			unsigned int frag_len = e1000_frag_len(adapter);
4410
4411			skb = build_skb(data - E1000_HEADROOM, frag_len);
4412			if (!skb) {
4413				adapter->alloc_rx_buff_failed++;
4414				break;
4415			}
4416
4417			skb_reserve(skb, E1000_HEADROOM);
4418			dma_unmap_single(&pdev->dev, buffer_info->dma,
4419					 adapter->rx_buffer_len,
4420					 DMA_FROM_DEVICE);
4421			buffer_info->dma = 0;
4422			buffer_info->rxbuf.data = NULL;
4423		}
4424
4425		if (++i == rx_ring->count)
4426			i = 0;
4427
4428		next_rxd = E1000_RX_DESC(*rx_ring, i);
4429		prefetch(next_rxd);
4430
4431		next_buffer = &rx_ring->buffer_info[i];
4432
4433		cleaned = true;
4434		cleaned_count++;
4435
4436		/* !EOP means multiple descriptors were used to store a single
4437		 * packet, if thats the case we need to toss it.  In fact, we
4438		 * to toss every packet with the EOP bit clear and the next
4439		 * frame that _does_ have the EOP bit set, as it is by
4440		 * definition only a frame fragment
4441		 */
4442		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4443			adapter->discarding = true;
4444
4445		if (adapter->discarding) {
4446			/* All receives must fit into a single buffer */
4447			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4448			dev_kfree_skb(skb);
4449			if (status & E1000_RXD_STAT_EOP)
4450				adapter->discarding = false;
4451			goto next_desc;
4452		}
4453
4454		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4455			if (e1000_tbi_should_accept(adapter, status,
4456						    rx_desc->errors,
4457						    length, data)) {
4458				length--;
4459			} else if (netdev->features & NETIF_F_RXALL) {
4460				goto process_skb;
4461			} else {
4462				dev_kfree_skb(skb);
4463				goto next_desc;
4464			}
4465		}
4466
4467process_skb:
4468		total_rx_bytes += (length - 4); /* don't count FCS */
4469		total_rx_packets++;
4470
4471		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4472			/* adjust length to remove Ethernet CRC, this must be
4473			 * done after the TBI_ACCEPT workaround above
4474			 */
4475			length -= 4;
4476
4477		if (buffer_info->rxbuf.data == NULL)
4478			skb_put(skb, length);
4479		else /* copybreak skb */
4480			skb_trim(skb, length);
4481
4482		/* Receive Checksum Offload */
4483		e1000_rx_checksum(adapter,
4484				  (u32)(status) |
4485				  ((u32)(rx_desc->errors) << 24),
4486				  le16_to_cpu(rx_desc->csum), skb);
4487
4488		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4489
4490next_desc:
4491		rx_desc->status = 0;
4492
4493		/* return some buffers to hardware, one at a time is too slow */
4494		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4495			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4496			cleaned_count = 0;
4497		}
4498
4499		/* use prefetched values */
4500		rx_desc = next_rxd;
4501		buffer_info = next_buffer;
4502	}
4503	rx_ring->next_to_clean = i;
4504
4505	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4506	if (cleaned_count)
4507		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4508
4509	adapter->total_rx_packets += total_rx_packets;
4510	adapter->total_rx_bytes += total_rx_bytes;
4511	netdev->stats.rx_bytes += total_rx_bytes;
4512	netdev->stats.rx_packets += total_rx_packets;
4513	return cleaned;
4514}
4515
4516/**
4517 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4518 * @adapter: address of board private structure
4519 * @rx_ring: pointer to receive ring structure
4520 * @cleaned_count: number of buffers to allocate this pass
4521 **/
4522static void
4523e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4524			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4525{
4526	struct pci_dev *pdev = adapter->pdev;
4527	struct e1000_rx_desc *rx_desc;
4528	struct e1000_rx_buffer *buffer_info;
4529	unsigned int i;
4530
4531	i = rx_ring->next_to_use;
4532	buffer_info = &rx_ring->buffer_info[i];
4533
4534	while (cleaned_count--) {
4535		/* allocate a new page if necessary */
4536		if (!buffer_info->rxbuf.page) {
4537			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4538			if (unlikely(!buffer_info->rxbuf.page)) {
4539				adapter->alloc_rx_buff_failed++;
4540				break;
4541			}
4542		}
4543
4544		if (!buffer_info->dma) {
4545			buffer_info->dma = dma_map_page(&pdev->dev,
4546							buffer_info->rxbuf.page, 0,
4547							adapter->rx_buffer_len,
4548							DMA_FROM_DEVICE);
4549			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4550				put_page(buffer_info->rxbuf.page);
4551				buffer_info->rxbuf.page = NULL;
4552				buffer_info->dma = 0;
4553				adapter->alloc_rx_buff_failed++;
4554				break;
4555			}
4556		}
4557
4558		rx_desc = E1000_RX_DESC(*rx_ring, i);
4559		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4560
4561		if (unlikely(++i == rx_ring->count))
4562			i = 0;
4563		buffer_info = &rx_ring->buffer_info[i];
4564	}
4565
4566	if (likely(rx_ring->next_to_use != i)) {
4567		rx_ring->next_to_use = i;
4568		if (unlikely(i-- == 0))
4569			i = (rx_ring->count - 1);
4570
4571		/* Force memory writes to complete before letting h/w
4572		 * know there are new descriptors to fetch.  (Only
4573		 * applicable for weak-ordered memory model archs,
4574		 * such as IA-64).
4575		 */
4576		wmb();
4577		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4578	}
4579}
4580
4581/**
4582 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4583 * @adapter: address of board private structure
 
 
4584 **/
4585static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4586				   struct e1000_rx_ring *rx_ring,
4587				   int cleaned_count)
4588{
4589	struct e1000_hw *hw = &adapter->hw;
4590	struct pci_dev *pdev = adapter->pdev;
4591	struct e1000_rx_desc *rx_desc;
4592	struct e1000_rx_buffer *buffer_info;
4593	unsigned int i;
4594	unsigned int bufsz = adapter->rx_buffer_len;
4595
4596	i = rx_ring->next_to_use;
4597	buffer_info = &rx_ring->buffer_info[i];
4598
4599	while (cleaned_count--) {
4600		void *data;
4601
4602		if (buffer_info->rxbuf.data)
4603			goto skip;
4604
4605		data = e1000_alloc_frag(adapter);
4606		if (!data) {
4607			/* Better luck next round */
4608			adapter->alloc_rx_buff_failed++;
4609			break;
4610		}
4611
4612		/* Fix for errata 23, can't cross 64kB boundary */
4613		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4614			void *olddata = data;
4615			e_err(rx_err, "skb align check failed: %u bytes at "
4616			      "%p\n", bufsz, data);
4617			/* Try again, without freeing the previous */
4618			data = e1000_alloc_frag(adapter);
4619			/* Failed allocation, critical failure */
4620			if (!data) {
4621				skb_free_frag(olddata);
4622				adapter->alloc_rx_buff_failed++;
4623				break;
4624			}
4625
4626			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4627				/* give up */
4628				skb_free_frag(data);
4629				skb_free_frag(olddata);
4630				adapter->alloc_rx_buff_failed++;
4631				break;
4632			}
4633
4634			/* Use new allocation */
4635			skb_free_frag(olddata);
4636		}
4637		buffer_info->dma = dma_map_single(&pdev->dev,
4638						  data,
4639						  adapter->rx_buffer_len,
4640						  DMA_FROM_DEVICE);
4641		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4642			skb_free_frag(data);
4643			buffer_info->dma = 0;
4644			adapter->alloc_rx_buff_failed++;
4645			break;
4646		}
4647
4648		/* XXX if it was allocated cleanly it will never map to a
4649		 * boundary crossing
4650		 */
4651
4652		/* Fix for errata 23, can't cross 64kB boundary */
4653		if (!e1000_check_64k_bound(adapter,
4654					(void *)(unsigned long)buffer_info->dma,
4655					adapter->rx_buffer_len)) {
4656			e_err(rx_err, "dma align check failed: %u bytes at "
4657			      "%p\n", adapter->rx_buffer_len,
4658			      (void *)(unsigned long)buffer_info->dma);
4659
4660			dma_unmap_single(&pdev->dev, buffer_info->dma,
4661					 adapter->rx_buffer_len,
4662					 DMA_FROM_DEVICE);
4663
4664			skb_free_frag(data);
4665			buffer_info->rxbuf.data = NULL;
4666			buffer_info->dma = 0;
4667
4668			adapter->alloc_rx_buff_failed++;
4669			break;
4670		}
4671		buffer_info->rxbuf.data = data;
4672 skip:
4673		rx_desc = E1000_RX_DESC(*rx_ring, i);
4674		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4675
4676		if (unlikely(++i == rx_ring->count))
4677			i = 0;
4678		buffer_info = &rx_ring->buffer_info[i];
4679	}
4680
4681	if (likely(rx_ring->next_to_use != i)) {
4682		rx_ring->next_to_use = i;
4683		if (unlikely(i-- == 0))
4684			i = (rx_ring->count - 1);
4685
4686		/* Force memory writes to complete before letting h/w
4687		 * know there are new descriptors to fetch.  (Only
4688		 * applicable for weak-ordered memory model archs,
4689		 * such as IA-64).
4690		 */
4691		wmb();
4692		writel(i, hw->hw_addr + rx_ring->rdt);
4693	}
4694}
4695
4696/**
4697 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4698 * @adapter:
4699 **/
4700static void e1000_smartspeed(struct e1000_adapter *adapter)
4701{
4702	struct e1000_hw *hw = &adapter->hw;
4703	u16 phy_status;
4704	u16 phy_ctrl;
4705
4706	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4707	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4708		return;
4709
4710	if (adapter->smartspeed == 0) {
4711		/* If Master/Slave config fault is asserted twice,
4712		 * we assume back-to-back
4713		 */
4714		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4715		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4716			return;
4717		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4718		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4719			return;
4720		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4721		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4722			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4723			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4724					    phy_ctrl);
4725			adapter->smartspeed++;
4726			if (!e1000_phy_setup_autoneg(hw) &&
4727			   !e1000_read_phy_reg(hw, PHY_CTRL,
4728					       &phy_ctrl)) {
4729				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4730					     MII_CR_RESTART_AUTO_NEG);
4731				e1000_write_phy_reg(hw, PHY_CTRL,
4732						    phy_ctrl);
4733			}
4734		}
4735		return;
4736	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4737		/* If still no link, perhaps using 2/3 pair cable */
4738		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4739		phy_ctrl |= CR_1000T_MS_ENABLE;
4740		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4741		if (!e1000_phy_setup_autoneg(hw) &&
4742		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4743			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4744				     MII_CR_RESTART_AUTO_NEG);
4745			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4746		}
4747	}
4748	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4749	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4750		adapter->smartspeed = 0;
4751}
4752
4753/**
4754 * e1000_ioctl -
4755 * @netdev:
4756 * @ifreq:
4757 * @cmd:
4758 **/
4759static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4760{
4761	switch (cmd) {
4762	case SIOCGMIIPHY:
4763	case SIOCGMIIREG:
4764	case SIOCSMIIREG:
4765		return e1000_mii_ioctl(netdev, ifr, cmd);
4766	default:
4767		return -EOPNOTSUPP;
4768	}
4769}
4770
4771/**
4772 * e1000_mii_ioctl -
4773 * @netdev:
4774 * @ifreq:
4775 * @cmd:
4776 **/
4777static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4778			   int cmd)
4779{
4780	struct e1000_adapter *adapter = netdev_priv(netdev);
4781	struct e1000_hw *hw = &adapter->hw;
4782	struct mii_ioctl_data *data = if_mii(ifr);
4783	int retval;
4784	u16 mii_reg;
4785	unsigned long flags;
4786
4787	if (hw->media_type != e1000_media_type_copper)
4788		return -EOPNOTSUPP;
4789
4790	switch (cmd) {
4791	case SIOCGMIIPHY:
4792		data->phy_id = hw->phy_addr;
4793		break;
4794	case SIOCGMIIREG:
4795		spin_lock_irqsave(&adapter->stats_lock, flags);
4796		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4797				   &data->val_out)) {
4798			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4799			return -EIO;
4800		}
4801		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4802		break;
4803	case SIOCSMIIREG:
4804		if (data->reg_num & ~(0x1F))
4805			return -EFAULT;
4806		mii_reg = data->val_in;
4807		spin_lock_irqsave(&adapter->stats_lock, flags);
4808		if (e1000_write_phy_reg(hw, data->reg_num,
4809					mii_reg)) {
4810			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4811			return -EIO;
4812		}
4813		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4814		if (hw->media_type == e1000_media_type_copper) {
4815			switch (data->reg_num) {
4816			case PHY_CTRL:
4817				if (mii_reg & MII_CR_POWER_DOWN)
4818					break;
4819				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4820					hw->autoneg = 1;
4821					hw->autoneg_advertised = 0x2F;
4822				} else {
4823					u32 speed;
4824					if (mii_reg & 0x40)
4825						speed = SPEED_1000;
4826					else if (mii_reg & 0x2000)
4827						speed = SPEED_100;
4828					else
4829						speed = SPEED_10;
4830					retval = e1000_set_spd_dplx(
4831						adapter, speed,
4832						((mii_reg & 0x100)
4833						 ? DUPLEX_FULL :
4834						 DUPLEX_HALF));
4835					if (retval)
4836						return retval;
4837				}
4838				if (netif_running(adapter->netdev))
4839					e1000_reinit_locked(adapter);
4840				else
4841					e1000_reset(adapter);
4842				break;
4843			case M88E1000_PHY_SPEC_CTRL:
4844			case M88E1000_EXT_PHY_SPEC_CTRL:
4845				if (e1000_phy_reset(hw))
4846					return -EIO;
4847				break;
4848			}
4849		} else {
4850			switch (data->reg_num) {
4851			case PHY_CTRL:
4852				if (mii_reg & MII_CR_POWER_DOWN)
4853					break;
4854				if (netif_running(adapter->netdev))
4855					e1000_reinit_locked(adapter);
4856				else
4857					e1000_reset(adapter);
4858				break;
4859			}
4860		}
4861		break;
4862	default:
4863		return -EOPNOTSUPP;
4864	}
4865	return E1000_SUCCESS;
4866}
4867
4868void e1000_pci_set_mwi(struct e1000_hw *hw)
4869{
4870	struct e1000_adapter *adapter = hw->back;
4871	int ret_val = pci_set_mwi(adapter->pdev);
4872
4873	if (ret_val)
4874		e_err(probe, "Error in setting MWI\n");
4875}
4876
4877void e1000_pci_clear_mwi(struct e1000_hw *hw)
4878{
4879	struct e1000_adapter *adapter = hw->back;
4880
4881	pci_clear_mwi(adapter->pdev);
4882}
4883
4884int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4885{
4886	struct e1000_adapter *adapter = hw->back;
4887	return pcix_get_mmrbc(adapter->pdev);
4888}
4889
4890void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4891{
4892	struct e1000_adapter *adapter = hw->back;
4893	pcix_set_mmrbc(adapter->pdev, mmrbc);
4894}
4895
4896void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4897{
4898	outl(value, port);
4899}
4900
4901static bool e1000_vlan_used(struct e1000_adapter *adapter)
4902{
4903	u16 vid;
4904
4905	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4906		return true;
4907	return false;
4908}
4909
4910static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4911			      netdev_features_t features)
4912{
4913	struct e1000_hw *hw = &adapter->hw;
4914	u32 ctrl;
4915
4916	ctrl = er32(CTRL);
4917	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4918		/* enable VLAN tag insert/strip */
4919		ctrl |= E1000_CTRL_VME;
4920	} else {
4921		/* disable VLAN tag insert/strip */
4922		ctrl &= ~E1000_CTRL_VME;
4923	}
4924	ew32(CTRL, ctrl);
4925}
4926static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4927				     bool filter_on)
4928{
4929	struct e1000_hw *hw = &adapter->hw;
4930	u32 rctl;
4931
4932	if (!test_bit(__E1000_DOWN, &adapter->flags))
4933		e1000_irq_disable(adapter);
4934
4935	__e1000_vlan_mode(adapter, adapter->netdev->features);
4936	if (filter_on) {
4937		/* enable VLAN receive filtering */
4938		rctl = er32(RCTL);
4939		rctl &= ~E1000_RCTL_CFIEN;
4940		if (!(adapter->netdev->flags & IFF_PROMISC))
4941			rctl |= E1000_RCTL_VFE;
4942		ew32(RCTL, rctl);
4943		e1000_update_mng_vlan(adapter);
4944	} else {
4945		/* disable VLAN receive filtering */
4946		rctl = er32(RCTL);
4947		rctl &= ~E1000_RCTL_VFE;
4948		ew32(RCTL, rctl);
4949	}
4950
4951	if (!test_bit(__E1000_DOWN, &adapter->flags))
4952		e1000_irq_enable(adapter);
4953}
4954
4955static void e1000_vlan_mode(struct net_device *netdev,
4956			    netdev_features_t features)
4957{
4958	struct e1000_adapter *adapter = netdev_priv(netdev);
4959
4960	if (!test_bit(__E1000_DOWN, &adapter->flags))
4961		e1000_irq_disable(adapter);
4962
4963	__e1000_vlan_mode(adapter, features);
4964
4965	if (!test_bit(__E1000_DOWN, &adapter->flags))
4966		e1000_irq_enable(adapter);
4967}
4968
4969static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4970				 __be16 proto, u16 vid)
4971{
4972	struct e1000_adapter *adapter = netdev_priv(netdev);
4973	struct e1000_hw *hw = &adapter->hw;
4974	u32 vfta, index;
4975
4976	if ((hw->mng_cookie.status &
4977	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4978	    (vid == adapter->mng_vlan_id))
4979		return 0;
4980
4981	if (!e1000_vlan_used(adapter))
4982		e1000_vlan_filter_on_off(adapter, true);
4983
4984	/* add VID to filter table */
4985	index = (vid >> 5) & 0x7F;
4986	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4987	vfta |= (1 << (vid & 0x1F));
4988	e1000_write_vfta(hw, index, vfta);
4989
4990	set_bit(vid, adapter->active_vlans);
4991
4992	return 0;
4993}
4994
4995static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4996				  __be16 proto, u16 vid)
4997{
4998	struct e1000_adapter *adapter = netdev_priv(netdev);
4999	struct e1000_hw *hw = &adapter->hw;
5000	u32 vfta, index;
5001
5002	if (!test_bit(__E1000_DOWN, &adapter->flags))
5003		e1000_irq_disable(adapter);
5004	if (!test_bit(__E1000_DOWN, &adapter->flags))
5005		e1000_irq_enable(adapter);
5006
5007	/* remove VID from filter table */
5008	index = (vid >> 5) & 0x7F;
5009	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5010	vfta &= ~(1 << (vid & 0x1F));
5011	e1000_write_vfta(hw, index, vfta);
5012
5013	clear_bit(vid, adapter->active_vlans);
5014
5015	if (!e1000_vlan_used(adapter))
5016		e1000_vlan_filter_on_off(adapter, false);
5017
5018	return 0;
5019}
5020
5021static void e1000_restore_vlan(struct e1000_adapter *adapter)
5022{
5023	u16 vid;
5024
5025	if (!e1000_vlan_used(adapter))
5026		return;
5027
5028	e1000_vlan_filter_on_off(adapter, true);
5029	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5030		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5031}
5032
5033int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5034{
5035	struct e1000_hw *hw = &adapter->hw;
5036
5037	hw->autoneg = 0;
5038
5039	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5040	 * for the switch() below to work
5041	 */
5042	if ((spd & 1) || (dplx & ~1))
5043		goto err_inval;
5044
5045	/* Fiber NICs only allow 1000 gbps Full duplex */
5046	if ((hw->media_type == e1000_media_type_fiber) &&
5047	    spd != SPEED_1000 &&
5048	    dplx != DUPLEX_FULL)
5049		goto err_inval;
5050
5051	switch (spd + dplx) {
5052	case SPEED_10 + DUPLEX_HALF:
5053		hw->forced_speed_duplex = e1000_10_half;
5054		break;
5055	case SPEED_10 + DUPLEX_FULL:
5056		hw->forced_speed_duplex = e1000_10_full;
5057		break;
5058	case SPEED_100 + DUPLEX_HALF:
5059		hw->forced_speed_duplex = e1000_100_half;
5060		break;
5061	case SPEED_100 + DUPLEX_FULL:
5062		hw->forced_speed_duplex = e1000_100_full;
5063		break;
5064	case SPEED_1000 + DUPLEX_FULL:
5065		hw->autoneg = 1;
5066		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5067		break;
5068	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5069	default:
5070		goto err_inval;
5071	}
5072
5073	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5074	hw->mdix = AUTO_ALL_MODES;
5075
5076	return 0;
5077
5078err_inval:
5079	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5080	return -EINVAL;
5081}
5082
5083static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5084{
5085	struct net_device *netdev = pci_get_drvdata(pdev);
5086	struct e1000_adapter *adapter = netdev_priv(netdev);
5087	struct e1000_hw *hw = &adapter->hw;
5088	u32 ctrl, ctrl_ext, rctl, status;
5089	u32 wufc = adapter->wol;
5090#ifdef CONFIG_PM
5091	int retval = 0;
5092#endif
5093
5094	netif_device_detach(netdev);
5095
5096	if (netif_running(netdev)) {
5097		int count = E1000_CHECK_RESET_COUNT;
5098
5099		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5100			usleep_range(10000, 20000);
5101
5102		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
 
5103		e1000_down(adapter);
 
5104	}
5105
5106#ifdef CONFIG_PM
5107	retval = pci_save_state(pdev);
5108	if (retval)
5109		return retval;
5110#endif
5111
5112	status = er32(STATUS);
5113	if (status & E1000_STATUS_LU)
5114		wufc &= ~E1000_WUFC_LNKC;
5115
5116	if (wufc) {
5117		e1000_setup_rctl(adapter);
5118		e1000_set_rx_mode(netdev);
5119
5120		rctl = er32(RCTL);
5121
5122		/* turn on all-multi mode if wake on multicast is enabled */
5123		if (wufc & E1000_WUFC_MC)
5124			rctl |= E1000_RCTL_MPE;
5125
5126		/* enable receives in the hardware */
5127		ew32(RCTL, rctl | E1000_RCTL_EN);
5128
5129		if (hw->mac_type >= e1000_82540) {
5130			ctrl = er32(CTRL);
5131			/* advertise wake from D3Cold */
5132			#define E1000_CTRL_ADVD3WUC 0x00100000
5133			/* phy power management enable */
5134			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5135			ctrl |= E1000_CTRL_ADVD3WUC |
5136				E1000_CTRL_EN_PHY_PWR_MGMT;
5137			ew32(CTRL, ctrl);
5138		}
5139
5140		if (hw->media_type == e1000_media_type_fiber ||
5141		    hw->media_type == e1000_media_type_internal_serdes) {
5142			/* keep the laser running in D3 */
5143			ctrl_ext = er32(CTRL_EXT);
5144			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5145			ew32(CTRL_EXT, ctrl_ext);
5146		}
5147
5148		ew32(WUC, E1000_WUC_PME_EN);
5149		ew32(WUFC, wufc);
5150	} else {
5151		ew32(WUC, 0);
5152		ew32(WUFC, 0);
5153	}
5154
5155	e1000_release_manageability(adapter);
5156
5157	*enable_wake = !!wufc;
5158
5159	/* make sure adapter isn't asleep if manageability is enabled */
5160	if (adapter->en_mng_pt)
5161		*enable_wake = true;
5162
5163	if (netif_running(netdev))
5164		e1000_free_irq(adapter);
5165
5166	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5167		pci_disable_device(pdev);
5168
5169	return 0;
5170}
5171
5172#ifdef CONFIG_PM
5173static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5174{
5175	int retval;
 
5176	bool wake;
5177
5178	retval = __e1000_shutdown(pdev, &wake);
5179	if (retval)
5180		return retval;
5181
5182	if (wake) {
5183		pci_prepare_to_sleep(pdev);
5184	} else {
5185		pci_wake_from_d3(pdev, false);
5186		pci_set_power_state(pdev, PCI_D3hot);
5187	}
5188
5189	return 0;
5190}
5191
5192static int e1000_resume(struct pci_dev *pdev)
5193{
 
5194	struct net_device *netdev = pci_get_drvdata(pdev);
5195	struct e1000_adapter *adapter = netdev_priv(netdev);
5196	struct e1000_hw *hw = &adapter->hw;
5197	u32 err;
5198
5199	pci_set_power_state(pdev, PCI_D0);
5200	pci_restore_state(pdev);
5201	pci_save_state(pdev);
5202
5203	if (adapter->need_ioport)
5204		err = pci_enable_device(pdev);
5205	else
5206		err = pci_enable_device_mem(pdev);
5207	if (err) {
5208		pr_err("Cannot enable PCI device from suspend\n");
5209		return err;
5210	}
5211
5212	/* flush memory to make sure state is correct */
5213	smp_mb__before_atomic();
5214	clear_bit(__E1000_DISABLED, &adapter->flags);
5215	pci_set_master(pdev);
5216
5217	pci_enable_wake(pdev, PCI_D3hot, 0);
5218	pci_enable_wake(pdev, PCI_D3cold, 0);
5219
5220	if (netif_running(netdev)) {
5221		err = e1000_request_irq(adapter);
5222		if (err)
5223			return err;
5224	}
5225
5226	e1000_power_up_phy(adapter);
5227	e1000_reset(adapter);
5228	ew32(WUS, ~0);
5229
5230	e1000_init_manageability(adapter);
5231
5232	if (netif_running(netdev))
5233		e1000_up(adapter);
5234
5235	netif_device_attach(netdev);
5236
5237	return 0;
5238}
5239#endif
5240
5241static void e1000_shutdown(struct pci_dev *pdev)
5242{
5243	bool wake;
5244
5245	__e1000_shutdown(pdev, &wake);
5246
5247	if (system_state == SYSTEM_POWER_OFF) {
5248		pci_wake_from_d3(pdev, wake);
5249		pci_set_power_state(pdev, PCI_D3hot);
5250	}
5251}
5252
5253#ifdef CONFIG_NET_POLL_CONTROLLER
5254/* Polling 'interrupt' - used by things like netconsole to send skbs
5255 * without having to re-enable interrupts. It's not called while
5256 * the interrupt routine is executing.
5257 */
5258static void e1000_netpoll(struct net_device *netdev)
5259{
5260	struct e1000_adapter *adapter = netdev_priv(netdev);
5261
5262	if (disable_hardirq(adapter->pdev->irq))
5263		e1000_intr(adapter->pdev->irq, netdev);
5264	enable_irq(adapter->pdev->irq);
5265}
5266#endif
5267
5268/**
5269 * e1000_io_error_detected - called when PCI error is detected
5270 * @pdev: Pointer to PCI device
5271 * @state: The current pci connection state
5272 *
5273 * This function is called after a PCI bus error affecting
5274 * this device has been detected.
5275 */
5276static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5277						pci_channel_state_t state)
5278{
5279	struct net_device *netdev = pci_get_drvdata(pdev);
5280	struct e1000_adapter *adapter = netdev_priv(netdev);
5281
 
5282	netif_device_detach(netdev);
5283
5284	if (state == pci_channel_io_perm_failure)
 
5285		return PCI_ERS_RESULT_DISCONNECT;
 
5286
5287	if (netif_running(netdev))
5288		e1000_down(adapter);
5289
5290	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5291		pci_disable_device(pdev);
 
5292
5293	/* Request a slot slot reset. */
5294	return PCI_ERS_RESULT_NEED_RESET;
5295}
5296
5297/**
5298 * e1000_io_slot_reset - called after the pci bus has been reset.
5299 * @pdev: Pointer to PCI device
5300 *
5301 * Restart the card from scratch, as if from a cold-boot. Implementation
5302 * resembles the first-half of the e1000_resume routine.
5303 */
5304static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5305{
5306	struct net_device *netdev = pci_get_drvdata(pdev);
5307	struct e1000_adapter *adapter = netdev_priv(netdev);
5308	struct e1000_hw *hw = &adapter->hw;
5309	int err;
5310
5311	if (adapter->need_ioport)
5312		err = pci_enable_device(pdev);
5313	else
5314		err = pci_enable_device_mem(pdev);
5315	if (err) {
5316		pr_err("Cannot re-enable PCI device after reset.\n");
5317		return PCI_ERS_RESULT_DISCONNECT;
5318	}
5319
5320	/* flush memory to make sure state is correct */
5321	smp_mb__before_atomic();
5322	clear_bit(__E1000_DISABLED, &adapter->flags);
5323	pci_set_master(pdev);
5324
5325	pci_enable_wake(pdev, PCI_D3hot, 0);
5326	pci_enable_wake(pdev, PCI_D3cold, 0);
5327
5328	e1000_reset(adapter);
5329	ew32(WUS, ~0);
5330
5331	return PCI_ERS_RESULT_RECOVERED;
5332}
5333
5334/**
5335 * e1000_io_resume - called when traffic can start flowing again.
5336 * @pdev: Pointer to PCI device
5337 *
5338 * This callback is called when the error recovery driver tells us that
5339 * its OK to resume normal operation. Implementation resembles the
5340 * second-half of the e1000_resume routine.
5341 */
5342static void e1000_io_resume(struct pci_dev *pdev)
5343{
5344	struct net_device *netdev = pci_get_drvdata(pdev);
5345	struct e1000_adapter *adapter = netdev_priv(netdev);
5346
5347	e1000_init_manageability(adapter);
5348
5349	if (netif_running(netdev)) {
5350		if (e1000_up(adapter)) {
5351			pr_info("can't bring device back up after reset\n");
5352			return;
5353		}
5354	}
5355
5356	netif_device_attach(netdev);
5357}
5358
5359/* e1000_main.c */