Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 1999 - 2006 Intel Corporation. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   3
   4#include "e1000.h"
   5#include <net/ip6_checksum.h>
   6#include <linux/io.h>
   7#include <linux/prefetch.h>
   8#include <linux/bitops.h>
   9#include <linux/if_vlan.h>
  10
  11char e1000_driver_name[] = "e1000";
  12static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 
 
  13static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  14
  15/* e1000_pci_tbl - PCI Device ID Table
  16 *
  17 * Last entry must be all 0s
  18 *
  19 * Macro expands to...
  20 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  21 */
  22static const struct pci_device_id e1000_pci_tbl[] = {
  23	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  24	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  25	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  26	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  27	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  28	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  29	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  30	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  31	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  32	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  33	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  34	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  35	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  36	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  37	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  38	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  39	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  40	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  41	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  42	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  43	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  44	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  45	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  46	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  47	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  48	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  49	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  50	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  51	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  53	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  54	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  55	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  56	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  57	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  58	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  59	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  60	/* required last entry */
  61	{0,}
  62};
  63
  64MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  65
  66int e1000_up(struct e1000_adapter *adapter);
  67void e1000_down(struct e1000_adapter *adapter);
  68void e1000_reinit_locked(struct e1000_adapter *adapter);
  69void e1000_reset(struct e1000_adapter *adapter);
  70int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  71int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  72void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  73void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  74static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  75				    struct e1000_tx_ring *txdr);
  76static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  77				    struct e1000_rx_ring *rxdr);
  78static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  79				    struct e1000_tx_ring *tx_ring);
  80static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  81				    struct e1000_rx_ring *rx_ring);
  82void e1000_update_stats(struct e1000_adapter *adapter);
  83
  84static int e1000_init_module(void);
  85static void e1000_exit_module(void);
  86static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  87static void e1000_remove(struct pci_dev *pdev);
  88static int e1000_alloc_queues(struct e1000_adapter *adapter);
  89static int e1000_sw_init(struct e1000_adapter *adapter);
  90int e1000_open(struct net_device *netdev);
  91int e1000_close(struct net_device *netdev);
  92static void e1000_configure_tx(struct e1000_adapter *adapter);
  93static void e1000_configure_rx(struct e1000_adapter *adapter);
  94static void e1000_setup_rctl(struct e1000_adapter *adapter);
  95static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  96static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  97static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
  98				struct e1000_tx_ring *tx_ring);
  99static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 100				struct e1000_rx_ring *rx_ring);
 101static void e1000_set_rx_mode(struct net_device *netdev);
 102static void e1000_update_phy_info_task(struct work_struct *work);
 103static void e1000_watchdog(struct work_struct *work);
 104static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 105static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 106				    struct net_device *netdev);
 
 107static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 108static int e1000_set_mac(struct net_device *netdev, void *p);
 109static irqreturn_t e1000_intr(int irq, void *data);
 110static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 111			       struct e1000_tx_ring *tx_ring);
 112static int e1000_clean(struct napi_struct *napi, int budget);
 113static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 114			       struct e1000_rx_ring *rx_ring,
 115			       int *work_done, int work_to_do);
 116static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 117				     struct e1000_rx_ring *rx_ring,
 118				     int *work_done, int work_to_do);
 119static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 120					 struct e1000_rx_ring *rx_ring,
 121					 int cleaned_count)
 122{
 123}
 124static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 125				   struct e1000_rx_ring *rx_ring,
 126				   int cleaned_count);
 127static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 128					 struct e1000_rx_ring *rx_ring,
 129					 int cleaned_count);
 130static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 131static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 132			   int cmd);
 133static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 134static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 135static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
 136static void e1000_reset_task(struct work_struct *work);
 137static void e1000_smartspeed(struct e1000_adapter *adapter);
 138static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 139				       struct sk_buff *skb);
 140
 141static bool e1000_vlan_used(struct e1000_adapter *adapter);
 142static void e1000_vlan_mode(struct net_device *netdev,
 143			    netdev_features_t features);
 144static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 145				     bool filter_on);
 146static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 147				 __be16 proto, u16 vid);
 148static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 149				  __be16 proto, u16 vid);
 150static void e1000_restore_vlan(struct e1000_adapter *adapter);
 151
 152static int __maybe_unused e1000_suspend(struct device *dev);
 153static int __maybe_unused e1000_resume(struct device *dev);
 
 
 154static void e1000_shutdown(struct pci_dev *pdev);
 155
 156#ifdef CONFIG_NET_POLL_CONTROLLER
 157/* for netdump / net console */
 158static void e1000_netpoll (struct net_device *netdev);
 159#endif
 160
 161#define COPYBREAK_DEFAULT 256
 162static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 163module_param(copybreak, uint, 0644);
 164MODULE_PARM_DESC(copybreak,
 165	"Maximum size of packet that is copied to a new buffer on receive");
 166
 167static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 168						pci_channel_state_t state);
 169static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 170static void e1000_io_resume(struct pci_dev *pdev);
 171
 172static const struct pci_error_handlers e1000_err_handler = {
 173	.error_detected = e1000_io_error_detected,
 174	.slot_reset = e1000_io_slot_reset,
 175	.resume = e1000_io_resume,
 176};
 177
 178static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
 179
 180static struct pci_driver e1000_driver = {
 181	.name     = e1000_driver_name,
 182	.id_table = e1000_pci_tbl,
 183	.probe    = e1000_probe,
 184	.remove   = e1000_remove,
 185	.driver = {
 186		.pm = &e1000_pm_ops,
 187	},
 
 
 188	.shutdown = e1000_shutdown,
 189	.err_handler = &e1000_err_handler
 190};
 191
 192MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 193MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 194MODULE_LICENSE("GPL v2");
 
 195
 196#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 197static int debug = -1;
 198module_param(debug, int, 0);
 199MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 200
 201/**
 202 * e1000_get_hw_dev - helper function for getting netdev
 203 * @hw: pointer to HW struct
 204 *
 205 * return device used by hardware layer to print debugging information
 206 *
 207 **/
 208struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 209{
 210	struct e1000_adapter *adapter = hw->back;
 211	return adapter->netdev;
 212}
 213
 214/**
 215 * e1000_init_module - Driver Registration Routine
 216 *
 217 * e1000_init_module is the first routine called when the driver is
 218 * loaded. All it does is register with the PCI subsystem.
 219 **/
 220static int __init e1000_init_module(void)
 221{
 222	int ret;
 223	pr_info("%s\n", e1000_driver_string);
 224
 225	pr_info("%s\n", e1000_copyright);
 226
 227	ret = pci_register_driver(&e1000_driver);
 228	if (copybreak != COPYBREAK_DEFAULT) {
 229		if (copybreak == 0)
 230			pr_info("copybreak disabled\n");
 231		else
 232			pr_info("copybreak enabled for "
 233				   "packets <= %u bytes\n", copybreak);
 234	}
 235	return ret;
 236}
 237
 238module_init(e1000_init_module);
 239
 240/**
 241 * e1000_exit_module - Driver Exit Cleanup Routine
 242 *
 243 * e1000_exit_module is called just before the driver is removed
 244 * from memory.
 245 **/
 246static void __exit e1000_exit_module(void)
 247{
 248	pci_unregister_driver(&e1000_driver);
 249}
 250
 251module_exit(e1000_exit_module);
 252
 253static int e1000_request_irq(struct e1000_adapter *adapter)
 254{
 255	struct net_device *netdev = adapter->netdev;
 256	irq_handler_t handler = e1000_intr;
 257	int irq_flags = IRQF_SHARED;
 258	int err;
 259
 260	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 261			  netdev);
 262	if (err) {
 263		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 264	}
 265
 266	return err;
 267}
 268
 269static void e1000_free_irq(struct e1000_adapter *adapter)
 270{
 271	struct net_device *netdev = adapter->netdev;
 272
 273	free_irq(adapter->pdev->irq, netdev);
 274}
 275
 276/**
 277 * e1000_irq_disable - Mask off interrupt generation on the NIC
 278 * @adapter: board private structure
 279 **/
 280static void e1000_irq_disable(struct e1000_adapter *adapter)
 281{
 282	struct e1000_hw *hw = &adapter->hw;
 283
 284	ew32(IMC, ~0);
 285	E1000_WRITE_FLUSH();
 286	synchronize_irq(adapter->pdev->irq);
 287}
 288
 289/**
 290 * e1000_irq_enable - Enable default interrupt generation settings
 291 * @adapter: board private structure
 292 **/
 293static void e1000_irq_enable(struct e1000_adapter *adapter)
 294{
 295	struct e1000_hw *hw = &adapter->hw;
 296
 297	ew32(IMS, IMS_ENABLE_MASK);
 298	E1000_WRITE_FLUSH();
 299}
 300
 301static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 302{
 303	struct e1000_hw *hw = &adapter->hw;
 304	struct net_device *netdev = adapter->netdev;
 305	u16 vid = hw->mng_cookie.vlan_id;
 306	u16 old_vid = adapter->mng_vlan_id;
 307
 308	if (!e1000_vlan_used(adapter))
 309		return;
 310
 311	if (!test_bit(vid, adapter->active_vlans)) {
 312		if (hw->mng_cookie.status &
 313		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 314			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 315			adapter->mng_vlan_id = vid;
 316		} else {
 317			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 318		}
 319		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 320		    (vid != old_vid) &&
 321		    !test_bit(old_vid, adapter->active_vlans))
 322			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 323					       old_vid);
 324	} else {
 325		adapter->mng_vlan_id = vid;
 326	}
 327}
 328
 329static void e1000_init_manageability(struct e1000_adapter *adapter)
 330{
 331	struct e1000_hw *hw = &adapter->hw;
 332
 333	if (adapter->en_mng_pt) {
 334		u32 manc = er32(MANC);
 335
 336		/* disable hardware interception of ARP */
 337		manc &= ~(E1000_MANC_ARP_EN);
 338
 339		ew32(MANC, manc);
 340	}
 341}
 342
 343static void e1000_release_manageability(struct e1000_adapter *adapter)
 344{
 345	struct e1000_hw *hw = &adapter->hw;
 346
 347	if (adapter->en_mng_pt) {
 348		u32 manc = er32(MANC);
 349
 350		/* re-enable hardware interception of ARP */
 351		manc |= E1000_MANC_ARP_EN;
 352
 353		ew32(MANC, manc);
 354	}
 355}
 356
 357/**
 358 * e1000_configure - configure the hardware for RX and TX
 359 * @adapter: private board structure
 360 **/
 361static void e1000_configure(struct e1000_adapter *adapter)
 362{
 363	struct net_device *netdev = adapter->netdev;
 364	int i;
 365
 366	e1000_set_rx_mode(netdev);
 367
 368	e1000_restore_vlan(adapter);
 369	e1000_init_manageability(adapter);
 370
 371	e1000_configure_tx(adapter);
 372	e1000_setup_rctl(adapter);
 373	e1000_configure_rx(adapter);
 374	/* call E1000_DESC_UNUSED which always leaves
 375	 * at least 1 descriptor unused to make sure
 376	 * next_to_use != next_to_clean
 377	 */
 378	for (i = 0; i < adapter->num_rx_queues; i++) {
 379		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 380		adapter->alloc_rx_buf(adapter, ring,
 381				      E1000_DESC_UNUSED(ring));
 382	}
 383}
 384
 385int e1000_up(struct e1000_adapter *adapter)
 386{
 387	struct e1000_hw *hw = &adapter->hw;
 388
 389	/* hardware has been reset, we need to reload some things */
 390	e1000_configure(adapter);
 391
 392	clear_bit(__E1000_DOWN, &adapter->flags);
 393
 394	napi_enable(&adapter->napi);
 395
 396	e1000_irq_enable(adapter);
 397
 398	netif_wake_queue(adapter->netdev);
 399
 400	/* fire a link change interrupt to start the watchdog */
 401	ew32(ICS, E1000_ICS_LSC);
 402	return 0;
 403}
 404
 405/**
 406 * e1000_power_up_phy - restore link in case the phy was powered down
 407 * @adapter: address of board private structure
 408 *
 409 * The phy may be powered down to save power and turn off link when the
 410 * driver is unloaded and wake on lan is not enabled (among others)
 411 * *** this routine MUST be followed by a call to e1000_reset ***
 412 **/
 413void e1000_power_up_phy(struct e1000_adapter *adapter)
 414{
 415	struct e1000_hw *hw = &adapter->hw;
 416	u16 mii_reg = 0;
 417
 418	/* Just clear the power down bit to wake the phy back up */
 419	if (hw->media_type == e1000_media_type_copper) {
 420		/* according to the manual, the phy will retain its
 421		 * settings across a power-down/up cycle
 422		 */
 423		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 424		mii_reg &= ~MII_CR_POWER_DOWN;
 425		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 426	}
 427}
 428
 429static void e1000_power_down_phy(struct e1000_adapter *adapter)
 430{
 431	struct e1000_hw *hw = &adapter->hw;
 432
 433	/* Power down the PHY so no link is implied when interface is down *
 434	 * The PHY cannot be powered down if any of the following is true *
 435	 * (a) WoL is enabled
 436	 * (b) AMT is active
 437	 * (c) SoL/IDER session is active
 438	 */
 439	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 440	   hw->media_type == e1000_media_type_copper) {
 441		u16 mii_reg = 0;
 442
 443		switch (hw->mac_type) {
 444		case e1000_82540:
 445		case e1000_82545:
 446		case e1000_82545_rev_3:
 447		case e1000_82546:
 448		case e1000_ce4100:
 449		case e1000_82546_rev_3:
 450		case e1000_82541:
 451		case e1000_82541_rev_2:
 452		case e1000_82547:
 453		case e1000_82547_rev_2:
 454			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 455				goto out;
 456			break;
 457		default:
 458			goto out;
 459		}
 460		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 461		mii_reg |= MII_CR_POWER_DOWN;
 462		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 463		msleep(1);
 464	}
 465out:
 466	return;
 467}
 468
 469static void e1000_down_and_stop(struct e1000_adapter *adapter)
 470{
 471	set_bit(__E1000_DOWN, &adapter->flags);
 472
 473	cancel_delayed_work_sync(&adapter->watchdog_task);
 474
 475	/*
 476	 * Since the watchdog task can reschedule other tasks, we should cancel
 477	 * it first, otherwise we can run into the situation when a work is
 478	 * still running after the adapter has been turned down.
 479	 */
 480
 481	cancel_delayed_work_sync(&adapter->phy_info_task);
 482	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 483
 484	/* Only kill reset task if adapter is not resetting */
 485	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 486		cancel_work_sync(&adapter->reset_task);
 487}
 488
 489void e1000_down(struct e1000_adapter *adapter)
 490{
 491	struct e1000_hw *hw = &adapter->hw;
 492	struct net_device *netdev = adapter->netdev;
 493	u32 rctl, tctl;
 494
 
 
 495	/* disable receives in the hardware */
 496	rctl = er32(RCTL);
 497	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 498	/* flush and sleep below */
 499
 500	netif_tx_disable(netdev);
 501
 502	/* disable transmits in the hardware */
 503	tctl = er32(TCTL);
 504	tctl &= ~E1000_TCTL_EN;
 505	ew32(TCTL, tctl);
 506	/* flush both disables and wait for them to finish */
 507	E1000_WRITE_FLUSH();
 508	msleep(10);
 509
 510	/* Set the carrier off after transmits have been disabled in the
 511	 * hardware, to avoid race conditions with e1000_watchdog() (which
 512	 * may be running concurrently to us, checking for the carrier
 513	 * bit to decide whether it should enable transmits again). Such
 514	 * a race condition would result into transmission being disabled
 515	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 516	 */
 517	netif_carrier_off(netdev);
 518
 519	napi_disable(&adapter->napi);
 520
 521	e1000_irq_disable(adapter);
 522
 523	/* Setting DOWN must be after irq_disable to prevent
 524	 * a screaming interrupt.  Setting DOWN also prevents
 525	 * tasks from rescheduling.
 526	 */
 527	e1000_down_and_stop(adapter);
 528
 529	adapter->link_speed = 0;
 530	adapter->link_duplex = 0;
 531
 532	e1000_reset(adapter);
 533	e1000_clean_all_tx_rings(adapter);
 534	e1000_clean_all_rx_rings(adapter);
 535}
 536
 537void e1000_reinit_locked(struct e1000_adapter *adapter)
 538{
 
 539	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 540		msleep(1);
 541
 542	/* only run the task if not already down */
 543	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
 544		e1000_down(adapter);
 545		e1000_up(adapter);
 546	}
 547
 548	clear_bit(__E1000_RESETTING, &adapter->flags);
 549}
 550
 551void e1000_reset(struct e1000_adapter *adapter)
 552{
 553	struct e1000_hw *hw = &adapter->hw;
 554	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 555	bool legacy_pba_adjust = false;
 556	u16 hwm;
 557
 558	/* Repartition Pba for greater than 9k mtu
 559	 * To take effect CTRL.RST is required.
 560	 */
 561
 562	switch (hw->mac_type) {
 563	case e1000_82542_rev2_0:
 564	case e1000_82542_rev2_1:
 565	case e1000_82543:
 566	case e1000_82544:
 567	case e1000_82540:
 568	case e1000_82541:
 569	case e1000_82541_rev_2:
 570		legacy_pba_adjust = true;
 571		pba = E1000_PBA_48K;
 572		break;
 573	case e1000_82545:
 574	case e1000_82545_rev_3:
 575	case e1000_82546:
 576	case e1000_ce4100:
 577	case e1000_82546_rev_3:
 578		pba = E1000_PBA_48K;
 579		break;
 580	case e1000_82547:
 581	case e1000_82547_rev_2:
 582		legacy_pba_adjust = true;
 583		pba = E1000_PBA_30K;
 584		break;
 585	case e1000_undefined:
 586	case e1000_num_macs:
 587		break;
 588	}
 589
 590	if (legacy_pba_adjust) {
 591		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 592			pba -= 8; /* allocate more FIFO for Tx */
 593
 594		if (hw->mac_type == e1000_82547) {
 595			adapter->tx_fifo_head = 0;
 596			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 597			adapter->tx_fifo_size =
 598				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 599			atomic_set(&adapter->tx_fifo_stall, 0);
 600		}
 601	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 602		/* adjust PBA for jumbo frames */
 603		ew32(PBA, pba);
 604
 605		/* To maintain wire speed transmits, the Tx FIFO should be
 606		 * large enough to accommodate two full transmit packets,
 607		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 608		 * the Rx FIFO should be large enough to accommodate at least
 609		 * one full receive packet and is similarly rounded up and
 610		 * expressed in KB.
 611		 */
 612		pba = er32(PBA);
 613		/* upper 16 bits has Tx packet buffer allocation size in KB */
 614		tx_space = pba >> 16;
 615		/* lower 16 bits has Rx packet buffer allocation size in KB */
 616		pba &= 0xffff;
 617		/* the Tx fifo also stores 16 bytes of information about the Tx
 618		 * but don't include ethernet FCS because hardware appends it
 619		 */
 620		min_tx_space = (hw->max_frame_size +
 621				sizeof(struct e1000_tx_desc) -
 622				ETH_FCS_LEN) * 2;
 623		min_tx_space = ALIGN(min_tx_space, 1024);
 624		min_tx_space >>= 10;
 625		/* software strips receive CRC, so leave room for it */
 626		min_rx_space = hw->max_frame_size;
 627		min_rx_space = ALIGN(min_rx_space, 1024);
 628		min_rx_space >>= 10;
 629
 630		/* If current Tx allocation is less than the min Tx FIFO size,
 631		 * and the min Tx FIFO size is less than the current Rx FIFO
 632		 * allocation, take space away from current Rx allocation
 633		 */
 634		if (tx_space < min_tx_space &&
 635		    ((min_tx_space - tx_space) < pba)) {
 636			pba = pba - (min_tx_space - tx_space);
 637
 638			/* PCI/PCIx hardware has PBA alignment constraints */
 639			switch (hw->mac_type) {
 640			case e1000_82545 ... e1000_82546_rev_3:
 641				pba &= ~(E1000_PBA_8K - 1);
 642				break;
 643			default:
 644				break;
 645			}
 646
 647			/* if short on Rx space, Rx wins and must trump Tx
 648			 * adjustment or use Early Receive if available
 649			 */
 650			if (pba < min_rx_space)
 651				pba = min_rx_space;
 652		}
 653	}
 654
 655	ew32(PBA, pba);
 656
 657	/* flow control settings:
 658	 * The high water mark must be low enough to fit one full frame
 659	 * (or the size used for early receive) above it in the Rx FIFO.
 660	 * Set it to the lower of:
 661	 * - 90% of the Rx FIFO size, and
 662	 * - the full Rx FIFO size minus the early receive size (for parts
 663	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 664	 * - the full Rx FIFO size minus one full frame
 665	 */
 666	hwm = min(((pba << 10) * 9 / 10),
 667		  ((pba << 10) - hw->max_frame_size));
 668
 669	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 670	hw->fc_low_water = hw->fc_high_water - 8;
 671	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 672	hw->fc_send_xon = 1;
 673	hw->fc = hw->original_fc;
 674
 675	/* Allow time for pending master requests to run */
 676	e1000_reset_hw(hw);
 677	if (hw->mac_type >= e1000_82544)
 678		ew32(WUC, 0);
 679
 680	if (e1000_init_hw(hw))
 681		e_dev_err("Hardware Error\n");
 682	e1000_update_mng_vlan(adapter);
 683
 684	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 685	if (hw->mac_type >= e1000_82544 &&
 686	    hw->autoneg == 1 &&
 687	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 688		u32 ctrl = er32(CTRL);
 689		/* clear phy power management bit if we are in gig only mode,
 690		 * which if enabled will attempt negotiation to 100Mb, which
 691		 * can cause a loss of link at power off or driver unload
 692		 */
 693		ctrl &= ~E1000_CTRL_SWDPIN3;
 694		ew32(CTRL, ctrl);
 695	}
 696
 697	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 698	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 699
 700	e1000_reset_adaptive(hw);
 701	e1000_phy_get_info(hw, &adapter->phy_info);
 702
 703	e1000_release_manageability(adapter);
 704}
 705
 706/* Dump the eeprom for users having checksum issues */
 707static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 708{
 709	struct net_device *netdev = adapter->netdev;
 710	struct ethtool_eeprom eeprom;
 711	const struct ethtool_ops *ops = netdev->ethtool_ops;
 712	u8 *data;
 713	int i;
 714	u16 csum_old, csum_new = 0;
 715
 716	eeprom.len = ops->get_eeprom_len(netdev);
 717	eeprom.offset = 0;
 718
 719	data = kmalloc(eeprom.len, GFP_KERNEL);
 720	if (!data)
 721		return;
 722
 723	ops->get_eeprom(netdev, &eeprom, data);
 724
 725	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 726		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 727	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 728		csum_new += data[i] + (data[i + 1] << 8);
 729	csum_new = EEPROM_SUM - csum_new;
 730
 731	pr_err("/*********************/\n");
 732	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 733	pr_err("Calculated              : 0x%04x\n", csum_new);
 734
 735	pr_err("Offset    Values\n");
 736	pr_err("========  ======\n");
 737	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 738
 739	pr_err("Include this output when contacting your support provider.\n");
 740	pr_err("This is not a software error! Something bad happened to\n");
 741	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 742	pr_err("result in further problems, possibly loss of data,\n");
 743	pr_err("corruption or system hangs!\n");
 744	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 745	pr_err("which is invalid and requires you to set the proper MAC\n");
 746	pr_err("address manually before continuing to enable this network\n");
 747	pr_err("device. Please inspect the EEPROM dump and report the\n");
 748	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 749	pr_err("/*********************/\n");
 750
 751	kfree(data);
 752}
 753
 754/**
 755 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 756 * @pdev: PCI device information struct
 757 *
 758 * Return true if an adapter needs ioport resources
 759 **/
 760static int e1000_is_need_ioport(struct pci_dev *pdev)
 761{
 762	switch (pdev->device) {
 763	case E1000_DEV_ID_82540EM:
 764	case E1000_DEV_ID_82540EM_LOM:
 765	case E1000_DEV_ID_82540EP:
 766	case E1000_DEV_ID_82540EP_LOM:
 767	case E1000_DEV_ID_82540EP_LP:
 768	case E1000_DEV_ID_82541EI:
 769	case E1000_DEV_ID_82541EI_MOBILE:
 770	case E1000_DEV_ID_82541ER:
 771	case E1000_DEV_ID_82541ER_LOM:
 772	case E1000_DEV_ID_82541GI:
 773	case E1000_DEV_ID_82541GI_LF:
 774	case E1000_DEV_ID_82541GI_MOBILE:
 775	case E1000_DEV_ID_82544EI_COPPER:
 776	case E1000_DEV_ID_82544EI_FIBER:
 777	case E1000_DEV_ID_82544GC_COPPER:
 778	case E1000_DEV_ID_82544GC_LOM:
 779	case E1000_DEV_ID_82545EM_COPPER:
 780	case E1000_DEV_ID_82545EM_FIBER:
 781	case E1000_DEV_ID_82546EB_COPPER:
 782	case E1000_DEV_ID_82546EB_FIBER:
 783	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 784		return true;
 785	default:
 786		return false;
 787	}
 788}
 789
 790static netdev_features_t e1000_fix_features(struct net_device *netdev,
 791	netdev_features_t features)
 792{
 793	/* Since there is no support for separate Rx/Tx vlan accel
 794	 * enable/disable make sure Tx flag is always in same state as Rx.
 795	 */
 796	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 797		features |= NETIF_F_HW_VLAN_CTAG_TX;
 798	else
 799		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 800
 801	return features;
 802}
 803
 804static int e1000_set_features(struct net_device *netdev,
 805	netdev_features_t features)
 806{
 807	struct e1000_adapter *adapter = netdev_priv(netdev);
 808	netdev_features_t changed = features ^ netdev->features;
 809
 810	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 811		e1000_vlan_mode(netdev, features);
 812
 813	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 814		return 0;
 815
 816	netdev->features = features;
 817	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 818
 819	if (netif_running(netdev))
 820		e1000_reinit_locked(adapter);
 821	else
 822		e1000_reset(adapter);
 823
 824	return 1;
 825}
 826
 827static const struct net_device_ops e1000_netdev_ops = {
 828	.ndo_open		= e1000_open,
 829	.ndo_stop		= e1000_close,
 830	.ndo_start_xmit		= e1000_xmit_frame,
 
 831	.ndo_set_rx_mode	= e1000_set_rx_mode,
 832	.ndo_set_mac_address	= e1000_set_mac,
 833	.ndo_tx_timeout		= e1000_tx_timeout,
 834	.ndo_change_mtu		= e1000_change_mtu,
 835	.ndo_eth_ioctl		= e1000_ioctl,
 836	.ndo_validate_addr	= eth_validate_addr,
 837	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 838	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 839#ifdef CONFIG_NET_POLL_CONTROLLER
 840	.ndo_poll_controller	= e1000_netpoll,
 841#endif
 842	.ndo_fix_features	= e1000_fix_features,
 843	.ndo_set_features	= e1000_set_features,
 844};
 845
 846/**
 847 * e1000_init_hw_struct - initialize members of hw struct
 848 * @adapter: board private struct
 849 * @hw: structure used by e1000_hw.c
 850 *
 851 * Factors out initialization of the e1000_hw struct to its own function
 852 * that can be called very early at init (just after struct allocation).
 853 * Fields are initialized based on PCI device information and
 854 * OS network device settings (MTU size).
 855 * Returns negative error codes if MAC type setup fails.
 856 */
 857static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 858				struct e1000_hw *hw)
 859{
 860	struct pci_dev *pdev = adapter->pdev;
 861
 862	/* PCI config space info */
 863	hw->vendor_id = pdev->vendor;
 864	hw->device_id = pdev->device;
 865	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 866	hw->subsystem_id = pdev->subsystem_device;
 867	hw->revision_id = pdev->revision;
 868
 869	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 870
 871	hw->max_frame_size = adapter->netdev->mtu +
 872			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 873	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 874
 875	/* identify the MAC */
 876	if (e1000_set_mac_type(hw)) {
 877		e_err(probe, "Unknown MAC Type\n");
 878		return -EIO;
 879	}
 880
 881	switch (hw->mac_type) {
 882	default:
 883		break;
 884	case e1000_82541:
 885	case e1000_82547:
 886	case e1000_82541_rev_2:
 887	case e1000_82547_rev_2:
 888		hw->phy_init_script = 1;
 889		break;
 890	}
 891
 892	e1000_set_media_type(hw);
 893	e1000_get_bus_info(hw);
 894
 895	hw->wait_autoneg_complete = false;
 896	hw->tbi_compatibility_en = true;
 897	hw->adaptive_ifs = true;
 898
 899	/* Copper options */
 900
 901	if (hw->media_type == e1000_media_type_copper) {
 902		hw->mdix = AUTO_ALL_MODES;
 903		hw->disable_polarity_correction = false;
 904		hw->master_slave = E1000_MASTER_SLAVE;
 905	}
 906
 907	return 0;
 908}
 909
 910/**
 911 * e1000_probe - Device Initialization Routine
 912 * @pdev: PCI device information struct
 913 * @ent: entry in e1000_pci_tbl
 914 *
 915 * Returns 0 on success, negative on failure
 916 *
 917 * e1000_probe initializes an adapter identified by a pci_dev structure.
 918 * The OS initialization, configuring of the adapter private structure,
 919 * and a hardware reset occur.
 920 **/
 921static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 922{
 923	struct net_device *netdev;
 924	struct e1000_adapter *adapter = NULL;
 925	struct e1000_hw *hw;
 926
 927	static int cards_found;
 928	static int global_quad_port_a; /* global ksp3 port a indication */
 929	int i, err, pci_using_dac;
 930	u16 eeprom_data = 0;
 931	u16 tmp = 0;
 932	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 933	int bars, need_ioport;
 934	bool disable_dev = false;
 935
 936	/* do not allocate ioport bars when not needed */
 937	need_ioport = e1000_is_need_ioport(pdev);
 938	if (need_ioport) {
 939		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 940		err = pci_enable_device(pdev);
 941	} else {
 942		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 943		err = pci_enable_device_mem(pdev);
 944	}
 945	if (err)
 946		return err;
 947
 948	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 949	if (err)
 950		goto err_pci_reg;
 951
 952	pci_set_master(pdev);
 953	err = pci_save_state(pdev);
 954	if (err)
 955		goto err_alloc_etherdev;
 956
 957	err = -ENOMEM;
 958	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 959	if (!netdev)
 960		goto err_alloc_etherdev;
 961
 962	SET_NETDEV_DEV(netdev, &pdev->dev);
 963
 964	pci_set_drvdata(pdev, netdev);
 965	adapter = netdev_priv(netdev);
 966	adapter->netdev = netdev;
 967	adapter->pdev = pdev;
 968	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 969	adapter->bars = bars;
 970	adapter->need_ioport = need_ioport;
 971
 972	hw = &adapter->hw;
 973	hw->back = adapter;
 974
 975	err = -EIO;
 976	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 977	if (!hw->hw_addr)
 978		goto err_ioremap;
 979
 980	if (adapter->need_ioport) {
 981		for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
 982			if (pci_resource_len(pdev, i) == 0)
 983				continue;
 984			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 985				hw->io_base = pci_resource_start(pdev, i);
 986				break;
 987			}
 988		}
 989	}
 990
 991	/* make ready for any if (hw->...) below */
 992	err = e1000_init_hw_struct(adapter, hw);
 993	if (err)
 994		goto err_sw_init;
 995
 996	/* there is a workaround being applied below that limits
 997	 * 64-bit DMA addresses to 64-bit hardware.  There are some
 998	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
 999	 */
1000	pci_using_dac = 0;
1001	if ((hw->bus_type == e1000_bus_type_pcix) &&
1002	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003		pci_using_dac = 1;
1004	} else {
1005		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006		if (err) {
1007			pr_err("No usable DMA config, aborting\n");
1008			goto err_dma;
1009		}
1010	}
1011
1012	netdev->netdev_ops = &e1000_netdev_ops;
1013	e1000_set_ethtool_ops(netdev);
1014	netdev->watchdog_timeo = 5 * HZ;
1015	netif_napi_add(netdev, &adapter->napi, e1000_clean);
1016
1017	strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
1018
1019	adapter->bd_number = cards_found;
1020
1021	/* setup the private structure */
1022
1023	err = e1000_sw_init(adapter);
1024	if (err)
1025		goto err_sw_init;
1026
1027	err = -EIO;
1028	if (hw->mac_type == e1000_ce4100) {
1029		hw->ce4100_gbe_mdio_base_virt =
1030					ioremap(pci_resource_start(pdev, BAR_1),
1031						pci_resource_len(pdev, BAR_1));
1032
1033		if (!hw->ce4100_gbe_mdio_base_virt)
1034			goto err_mdio_ioremap;
1035	}
1036
1037	if (hw->mac_type >= e1000_82543) {
1038		netdev->hw_features = NETIF_F_SG |
1039				   NETIF_F_HW_CSUM |
1040				   NETIF_F_HW_VLAN_CTAG_RX;
1041		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042				   NETIF_F_HW_VLAN_CTAG_FILTER;
1043	}
1044
1045	if ((hw->mac_type >= e1000_82544) &&
1046	   (hw->mac_type != e1000_82547))
1047		netdev->hw_features |= NETIF_F_TSO;
1048
1049	netdev->priv_flags |= IFF_SUPP_NOFCS;
1050
1051	netdev->features |= netdev->hw_features;
1052	netdev->hw_features |= (NETIF_F_RXCSUM |
1053				NETIF_F_RXALL |
1054				NETIF_F_RXFCS);
1055
1056	if (pci_using_dac) {
1057		netdev->features |= NETIF_F_HIGHDMA;
1058		netdev->vlan_features |= NETIF_F_HIGHDMA;
1059	}
1060
1061	netdev->vlan_features |= (NETIF_F_TSO |
1062				  NETIF_F_HW_CSUM |
1063				  NETIF_F_SG);
1064
1065	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1066	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068		netdev->priv_flags |= IFF_UNICAST_FLT;
1069
1070	/* MTU range: 46 - 16110 */
1071	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073
1074	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075
1076	/* initialize eeprom parameters */
1077	if (e1000_init_eeprom_params(hw)) {
1078		e_err(probe, "EEPROM initialization failed\n");
1079		goto err_eeprom;
1080	}
1081
1082	/* before reading the EEPROM, reset the controller to
1083	 * put the device in a known good starting state
1084	 */
1085
1086	e1000_reset_hw(hw);
1087
1088	/* make sure the EEPROM is good */
1089	if (e1000_validate_eeprom_checksum(hw) < 0) {
1090		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091		e1000_dump_eeprom(adapter);
1092		/* set MAC address to all zeroes to invalidate and temporary
1093		 * disable this device for the user. This blocks regular
1094		 * traffic while still permitting ethtool ioctls from reaching
1095		 * the hardware as well as allowing the user to run the
1096		 * interface after manually setting a hw addr using
1097		 * `ip set address`
1098		 */
1099		memset(hw->mac_addr, 0, netdev->addr_len);
1100	} else {
1101		/* copy the MAC address out of the EEPROM */
1102		if (e1000_read_mac_addr(hw))
1103			e_err(probe, "EEPROM Read Error\n");
1104	}
1105	/* don't block initialization here due to bad MAC address */
1106	eth_hw_addr_set(netdev, hw->mac_addr);
1107
1108	if (!is_valid_ether_addr(netdev->dev_addr))
1109		e_err(probe, "Invalid MAC Address\n");
1110
1111
1112	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114			  e1000_82547_tx_fifo_stall_task);
1115	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117
1118	e1000_check_options(adapter);
1119
1120	/* Initial Wake on LAN setting
1121	 * If APM wake is enabled in the EEPROM,
1122	 * enable the ACPI Magic Packet filter
1123	 */
1124
1125	switch (hw->mac_type) {
1126	case e1000_82542_rev2_0:
1127	case e1000_82542_rev2_1:
1128	case e1000_82543:
1129		break;
1130	case e1000_82544:
1131		e1000_read_eeprom(hw,
1132			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134		break;
1135	case e1000_82546:
1136	case e1000_82546_rev_3:
1137		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138			e1000_read_eeprom(hw,
1139				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140			break;
1141		}
1142		fallthrough;
1143	default:
1144		e1000_read_eeprom(hw,
1145			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146		break;
1147	}
1148	if (eeprom_data & eeprom_apme_mask)
1149		adapter->eeprom_wol |= E1000_WUFC_MAG;
1150
1151	/* now that we have the eeprom settings, apply the special cases
1152	 * where the eeprom may be wrong or the board simply won't support
1153	 * wake on lan on a particular port
1154	 */
1155	switch (pdev->device) {
1156	case E1000_DEV_ID_82546GB_PCIE:
1157		adapter->eeprom_wol = 0;
1158		break;
1159	case E1000_DEV_ID_82546EB_FIBER:
1160	case E1000_DEV_ID_82546GB_FIBER:
1161		/* Wake events only supported on port A for dual fiber
1162		 * regardless of eeprom setting
1163		 */
1164		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165			adapter->eeprom_wol = 0;
1166		break;
1167	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168		/* if quad port adapter, disable WoL on all but port A */
1169		if (global_quad_port_a != 0)
1170			adapter->eeprom_wol = 0;
1171		else
1172			adapter->quad_port_a = true;
1173		/* Reset for multiple quad port adapters */
1174		if (++global_quad_port_a == 4)
1175			global_quad_port_a = 0;
1176		break;
1177	}
1178
1179	/* initialize the wol settings based on the eeprom settings */
1180	adapter->wol = adapter->eeprom_wol;
1181	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182
1183	/* Auto detect PHY address */
1184	if (hw->mac_type == e1000_ce4100) {
1185		for (i = 0; i < 32; i++) {
1186			hw->phy_addr = i;
1187			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188
1189			if (tmp != 0 && tmp != 0xFF)
1190				break;
1191		}
1192
1193		if (i >= 32)
1194			goto err_eeprom;
1195	}
1196
1197	/* reset the hardware with the new settings */
1198	e1000_reset(adapter);
1199
1200	strcpy(netdev->name, "eth%d");
1201	err = register_netdev(netdev);
1202	if (err)
1203		goto err_register;
1204
1205	e1000_vlan_filter_on_off(adapter, false);
1206
1207	/* print bus type/speed/width info */
1208	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215	       netdev->dev_addr);
1216
1217	/* carrier off reporting is important to ethtool even BEFORE open */
1218	netif_carrier_off(netdev);
1219
1220	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221
1222	cards_found++;
1223	return 0;
1224
1225err_register:
1226err_eeprom:
1227	e1000_phy_hw_reset(hw);
1228
1229	if (hw->flash_address)
1230		iounmap(hw->flash_address);
1231	kfree(adapter->tx_ring);
1232	kfree(adapter->rx_ring);
1233err_dma:
1234err_sw_init:
1235err_mdio_ioremap:
1236	iounmap(hw->ce4100_gbe_mdio_base_virt);
1237	iounmap(hw->hw_addr);
1238err_ioremap:
1239	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240	free_netdev(netdev);
1241err_alloc_etherdev:
1242	pci_release_selected_regions(pdev, bars);
1243err_pci_reg:
1244	if (!adapter || disable_dev)
1245		pci_disable_device(pdev);
1246	return err;
1247}
1248
1249/**
1250 * e1000_remove - Device Removal Routine
1251 * @pdev: PCI device information struct
1252 *
1253 * e1000_remove is called by the PCI subsystem to alert the driver
1254 * that it should release a PCI device. That could be caused by a
1255 * Hot-Plug event, or because the driver is going to be removed from
1256 * memory.
1257 **/
1258static void e1000_remove(struct pci_dev *pdev)
1259{
1260	struct net_device *netdev = pci_get_drvdata(pdev);
1261	struct e1000_adapter *adapter = netdev_priv(netdev);
1262	struct e1000_hw *hw = &adapter->hw;
1263	bool disable_dev;
1264
1265	e1000_down_and_stop(adapter);
1266	e1000_release_manageability(adapter);
1267
1268	unregister_netdev(netdev);
1269
1270	e1000_phy_hw_reset(hw);
1271
1272	kfree(adapter->tx_ring);
1273	kfree(adapter->rx_ring);
1274
1275	if (hw->mac_type == e1000_ce4100)
1276		iounmap(hw->ce4100_gbe_mdio_base_virt);
1277	iounmap(hw->hw_addr);
1278	if (hw->flash_address)
1279		iounmap(hw->flash_address);
1280	pci_release_selected_regions(pdev, adapter->bars);
1281
1282	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283	free_netdev(netdev);
1284
1285	if (disable_dev)
1286		pci_disable_device(pdev);
1287}
1288
1289/**
1290 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1291 * @adapter: board private structure to initialize
1292 *
1293 * e1000_sw_init initializes the Adapter private data structure.
1294 * e1000_init_hw_struct MUST be called before this function
1295 **/
1296static int e1000_sw_init(struct e1000_adapter *adapter)
1297{
1298	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299
1300	adapter->num_tx_queues = 1;
1301	adapter->num_rx_queues = 1;
1302
1303	if (e1000_alloc_queues(adapter)) {
1304		e_err(probe, "Unable to allocate memory for queues\n");
1305		return -ENOMEM;
1306	}
1307
1308	/* Explicitly disable IRQ since the NIC can be in any state. */
1309	e1000_irq_disable(adapter);
1310
1311	spin_lock_init(&adapter->stats_lock);
1312
1313	set_bit(__E1000_DOWN, &adapter->flags);
1314
1315	return 0;
1316}
1317
1318/**
1319 * e1000_alloc_queues - Allocate memory for all rings
1320 * @adapter: board private structure to initialize
1321 *
1322 * We allocate one ring per queue at run-time since we don't know the
1323 * number of queues at compile-time.
1324 **/
1325static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326{
1327	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329	if (!adapter->tx_ring)
1330		return -ENOMEM;
1331
1332	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334	if (!adapter->rx_ring) {
1335		kfree(adapter->tx_ring);
1336		return -ENOMEM;
1337	}
1338
1339	return E1000_SUCCESS;
1340}
1341
1342/**
1343 * e1000_open - Called when a network interface is made active
1344 * @netdev: network interface device structure
1345 *
1346 * Returns 0 on success, negative value on failure
1347 *
1348 * The open entry point is called when a network interface is made
1349 * active by the system (IFF_UP).  At this point all resources needed
1350 * for transmit and receive operations are allocated, the interrupt
1351 * handler is registered with the OS, the watchdog task is started,
1352 * and the stack is notified that the interface is ready.
1353 **/
1354int e1000_open(struct net_device *netdev)
1355{
1356	struct e1000_adapter *adapter = netdev_priv(netdev);
1357	struct e1000_hw *hw = &adapter->hw;
1358	int err;
1359
1360	/* disallow open during test */
1361	if (test_bit(__E1000_TESTING, &adapter->flags))
1362		return -EBUSY;
1363
1364	netif_carrier_off(netdev);
1365
1366	/* allocate transmit descriptors */
1367	err = e1000_setup_all_tx_resources(adapter);
1368	if (err)
1369		goto err_setup_tx;
1370
1371	/* allocate receive descriptors */
1372	err = e1000_setup_all_rx_resources(adapter);
1373	if (err)
1374		goto err_setup_rx;
1375
1376	e1000_power_up_phy(adapter);
1377
1378	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379	if ((hw->mng_cookie.status &
1380			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381		e1000_update_mng_vlan(adapter);
1382	}
1383
1384	/* before we allocate an interrupt, we must be ready to handle it.
1385	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1386	 * as soon as we call pci_request_irq, so we have to setup our
1387	 * clean_rx handler before we do so.
1388	 */
1389	e1000_configure(adapter);
1390
1391	err = e1000_request_irq(adapter);
1392	if (err)
1393		goto err_req_irq;
1394
1395	/* From here on the code is the same as e1000_up() */
1396	clear_bit(__E1000_DOWN, &adapter->flags);
1397
1398	napi_enable(&adapter->napi);
1399
1400	e1000_irq_enable(adapter);
1401
1402	netif_start_queue(netdev);
1403
1404	/* fire a link status change interrupt to start the watchdog */
1405	ew32(ICS, E1000_ICS_LSC);
1406
1407	return E1000_SUCCESS;
1408
1409err_req_irq:
1410	e1000_power_down_phy(adapter);
1411	e1000_free_all_rx_resources(adapter);
1412err_setup_rx:
1413	e1000_free_all_tx_resources(adapter);
1414err_setup_tx:
1415	e1000_reset(adapter);
1416
1417	return err;
1418}
1419
1420/**
1421 * e1000_close - Disables a network interface
1422 * @netdev: network interface device structure
1423 *
1424 * Returns 0, this is not allowed to fail
1425 *
1426 * The close entry point is called when an interface is de-activated
1427 * by the OS.  The hardware is still under the drivers control, but
1428 * needs to be disabled.  A global MAC reset is issued to stop the
1429 * hardware, and all transmit and receive resources are freed.
1430 **/
1431int e1000_close(struct net_device *netdev)
1432{
1433	struct e1000_adapter *adapter = netdev_priv(netdev);
1434	struct e1000_hw *hw = &adapter->hw;
1435	int count = E1000_CHECK_RESET_COUNT;
1436
1437	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438		usleep_range(10000, 20000);
1439
1440	WARN_ON(count < 0);
1441
1442	/* signal that we're down so that the reset task will no longer run */
1443	set_bit(__E1000_DOWN, &adapter->flags);
1444	clear_bit(__E1000_RESETTING, &adapter->flags);
1445
1446	e1000_down(adapter);
1447	e1000_power_down_phy(adapter);
1448	e1000_free_irq(adapter);
1449
1450	e1000_free_all_tx_resources(adapter);
1451	e1000_free_all_rx_resources(adapter);
1452
1453	/* kill manageability vlan ID if supported, but not if a vlan with
1454	 * the same ID is registered on the host OS (let 8021q kill it)
1455	 */
1456	if ((hw->mng_cookie.status &
1457	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460				       adapter->mng_vlan_id);
1461	}
1462
1463	return 0;
1464}
1465
1466/**
1467 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1468 * @adapter: address of board private structure
1469 * @start: address of beginning of memory
1470 * @len: length of memory
1471 **/
1472static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473				  unsigned long len)
1474{
1475	struct e1000_hw *hw = &adapter->hw;
1476	unsigned long begin = (unsigned long)start;
1477	unsigned long end = begin + len;
1478
1479	/* First rev 82545 and 82546 need to not allow any memory
1480	 * write location to cross 64k boundary due to errata 23
1481	 */
1482	if (hw->mac_type == e1000_82545 ||
1483	    hw->mac_type == e1000_ce4100 ||
1484	    hw->mac_type == e1000_82546) {
1485		return ((begin ^ (end - 1)) >> 16) == 0;
1486	}
1487
1488	return true;
1489}
1490
1491/**
1492 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1493 * @adapter: board private structure
1494 * @txdr:    tx descriptor ring (for a specific queue) to setup
1495 *
1496 * Return 0 on success, negative on failure
1497 **/
1498static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499				    struct e1000_tx_ring *txdr)
1500{
1501	struct pci_dev *pdev = adapter->pdev;
1502	int size;
1503
1504	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505	txdr->buffer_info = vzalloc(size);
1506	if (!txdr->buffer_info)
1507		return -ENOMEM;
1508
1509	/* round up to nearest 4K */
1510
1511	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512	txdr->size = ALIGN(txdr->size, 4096);
1513
1514	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515					GFP_KERNEL);
1516	if (!txdr->desc) {
1517setup_tx_desc_die:
1518		vfree(txdr->buffer_info);
1519		return -ENOMEM;
1520	}
1521
1522	/* Fix for errata 23, can't cross 64kB boundary */
1523	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524		void *olddesc = txdr->desc;
1525		dma_addr_t olddma = txdr->dma;
1526		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527		      txdr->size, txdr->desc);
1528		/* Try again, without freeing the previous */
1529		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530						&txdr->dma, GFP_KERNEL);
1531		/* Failed allocation, critical failure */
1532		if (!txdr->desc) {
1533			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534					  olddma);
1535			goto setup_tx_desc_die;
1536		}
1537
1538		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539			/* give up */
1540			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541					  txdr->dma);
1542			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543					  olddma);
1544			e_err(probe, "Unable to allocate aligned memory "
1545			      "for the transmit descriptor ring\n");
1546			vfree(txdr->buffer_info);
1547			return -ENOMEM;
1548		} else {
1549			/* Free old allocation, new allocation was successful */
1550			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551					  olddma);
1552		}
1553	}
1554	memset(txdr->desc, 0, txdr->size);
1555
1556	txdr->next_to_use = 0;
1557	txdr->next_to_clean = 0;
1558
1559	return 0;
1560}
1561
1562/**
1563 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1564 * 				  (Descriptors) for all queues
1565 * @adapter: board private structure
1566 *
1567 * Return 0 on success, negative on failure
1568 **/
1569int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570{
1571	int i, err = 0;
1572
1573	for (i = 0; i < adapter->num_tx_queues; i++) {
1574		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575		if (err) {
1576			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577			for (i-- ; i >= 0; i--)
1578				e1000_free_tx_resources(adapter,
1579							&adapter->tx_ring[i]);
1580			break;
1581		}
1582	}
1583
1584	return err;
1585}
1586
1587/**
1588 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1589 * @adapter: board private structure
1590 *
1591 * Configure the Tx unit of the MAC after a reset.
1592 **/
1593static void e1000_configure_tx(struct e1000_adapter *adapter)
1594{
1595	u64 tdba;
1596	struct e1000_hw *hw = &adapter->hw;
1597	u32 tdlen, tctl, tipg;
1598	u32 ipgr1, ipgr2;
1599
1600	/* Setup the HW Tx Head and Tail descriptor pointers */
1601
1602	switch (adapter->num_tx_queues) {
1603	case 1:
1604	default:
1605		tdba = adapter->tx_ring[0].dma;
1606		tdlen = adapter->tx_ring[0].count *
1607			sizeof(struct e1000_tx_desc);
1608		ew32(TDLEN, tdlen);
1609		ew32(TDBAH, (tdba >> 32));
1610		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611		ew32(TDT, 0);
1612		ew32(TDH, 0);
1613		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614					   E1000_TDH : E1000_82542_TDH);
1615		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616					   E1000_TDT : E1000_82542_TDT);
1617		break;
1618	}
1619
1620	/* Set the default values for the Tx Inter Packet Gap timer */
1621	if ((hw->media_type == e1000_media_type_fiber ||
1622	     hw->media_type == e1000_media_type_internal_serdes))
1623		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624	else
1625		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626
1627	switch (hw->mac_type) {
1628	case e1000_82542_rev2_0:
1629	case e1000_82542_rev2_1:
1630		tipg = DEFAULT_82542_TIPG_IPGT;
1631		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633		break;
1634	default:
1635		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637		break;
1638	}
1639	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641	ew32(TIPG, tipg);
1642
1643	/* Set the Tx Interrupt Delay register */
1644
1645	ew32(TIDV, adapter->tx_int_delay);
1646	if (hw->mac_type >= e1000_82540)
1647		ew32(TADV, adapter->tx_abs_int_delay);
1648
1649	/* Program the Transmit Control Register */
1650
1651	tctl = er32(TCTL);
1652	tctl &= ~E1000_TCTL_CT;
1653	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655
1656	e1000_config_collision_dist(hw);
1657
1658	/* Setup Transmit Descriptor Settings for eop descriptor */
1659	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660
1661	/* only set IDE if we are delaying interrupts using the timers */
1662	if (adapter->tx_int_delay)
1663		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664
1665	if (hw->mac_type < e1000_82543)
1666		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667	else
1668		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669
1670	/* Cache if we're 82544 running in PCI-X because we'll
1671	 * need this to apply a workaround later in the send path.
1672	 */
1673	if (hw->mac_type == e1000_82544 &&
1674	    hw->bus_type == e1000_bus_type_pcix)
1675		adapter->pcix_82544 = true;
1676
1677	ew32(TCTL, tctl);
1678
1679}
1680
1681/**
1682 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1683 * @adapter: board private structure
1684 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1685 *
1686 * Returns 0 on success, negative on failure
1687 **/
1688static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689				    struct e1000_rx_ring *rxdr)
1690{
1691	struct pci_dev *pdev = adapter->pdev;
1692	int size, desc_len;
1693
1694	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695	rxdr->buffer_info = vzalloc(size);
1696	if (!rxdr->buffer_info)
1697		return -ENOMEM;
1698
1699	desc_len = sizeof(struct e1000_rx_desc);
1700
1701	/* Round up to nearest 4K */
1702
1703	rxdr->size = rxdr->count * desc_len;
1704	rxdr->size = ALIGN(rxdr->size, 4096);
1705
1706	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707					GFP_KERNEL);
1708	if (!rxdr->desc) {
1709setup_rx_desc_die:
1710		vfree(rxdr->buffer_info);
1711		return -ENOMEM;
1712	}
1713
1714	/* Fix for errata 23, can't cross 64kB boundary */
1715	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716		void *olddesc = rxdr->desc;
1717		dma_addr_t olddma = rxdr->dma;
1718		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719		      rxdr->size, rxdr->desc);
1720		/* Try again, without freeing the previous */
1721		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722						&rxdr->dma, GFP_KERNEL);
1723		/* Failed allocation, critical failure */
1724		if (!rxdr->desc) {
1725			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726					  olddma);
1727			goto setup_rx_desc_die;
1728		}
1729
1730		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731			/* give up */
1732			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733					  rxdr->dma);
1734			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735					  olddma);
1736			e_err(probe, "Unable to allocate aligned memory for "
1737			      "the Rx descriptor ring\n");
1738			goto setup_rx_desc_die;
1739		} else {
1740			/* Free old allocation, new allocation was successful */
1741			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742					  olddma);
1743		}
1744	}
1745	memset(rxdr->desc, 0, rxdr->size);
1746
1747	rxdr->next_to_clean = 0;
1748	rxdr->next_to_use = 0;
1749	rxdr->rx_skb_top = NULL;
1750
1751	return 0;
1752}
1753
1754/**
1755 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1756 * 				  (Descriptors) for all queues
1757 * @adapter: board private structure
1758 *
1759 * Return 0 on success, negative on failure
1760 **/
1761int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762{
1763	int i, err = 0;
1764
1765	for (i = 0; i < adapter->num_rx_queues; i++) {
1766		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767		if (err) {
1768			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769			for (i-- ; i >= 0; i--)
1770				e1000_free_rx_resources(adapter,
1771							&adapter->rx_ring[i]);
1772			break;
1773		}
1774	}
1775
1776	return err;
1777}
1778
1779/**
1780 * e1000_setup_rctl - configure the receive control registers
1781 * @adapter: Board private structure
1782 **/
1783static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784{
1785	struct e1000_hw *hw = &adapter->hw;
1786	u32 rctl;
1787
1788	rctl = er32(RCTL);
1789
1790	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791
1792	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793		E1000_RCTL_RDMTS_HALF |
1794		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795
1796	if (hw->tbi_compatibility_on == 1)
1797		rctl |= E1000_RCTL_SBP;
1798	else
1799		rctl &= ~E1000_RCTL_SBP;
1800
1801	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802		rctl &= ~E1000_RCTL_LPE;
1803	else
1804		rctl |= E1000_RCTL_LPE;
1805
1806	/* Setup buffer sizes */
1807	rctl &= ~E1000_RCTL_SZ_4096;
1808	rctl |= E1000_RCTL_BSEX;
1809	switch (adapter->rx_buffer_len) {
1810	case E1000_RXBUFFER_2048:
1811	default:
1812		rctl |= E1000_RCTL_SZ_2048;
1813		rctl &= ~E1000_RCTL_BSEX;
1814		break;
1815	case E1000_RXBUFFER_4096:
1816		rctl |= E1000_RCTL_SZ_4096;
1817		break;
1818	case E1000_RXBUFFER_8192:
1819		rctl |= E1000_RCTL_SZ_8192;
1820		break;
1821	case E1000_RXBUFFER_16384:
1822		rctl |= E1000_RCTL_SZ_16384;
1823		break;
1824	}
1825
1826	/* This is useful for sniffing bad packets. */
1827	if (adapter->netdev->features & NETIF_F_RXALL) {
1828		/* UPE and MPE will be handled by normal PROMISC logic
1829		 * in e1000e_set_rx_mode
1830		 */
1831		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1832			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1833			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1834
1835		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1836			  E1000_RCTL_DPF | /* Allow filtered pause */
1837			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1838		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1839		 * and that breaks VLANs.
1840		 */
1841	}
1842
1843	ew32(RCTL, rctl);
1844}
1845
1846/**
1847 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1848 * @adapter: board private structure
1849 *
1850 * Configure the Rx unit of the MAC after a reset.
1851 **/
1852static void e1000_configure_rx(struct e1000_adapter *adapter)
1853{
1854	u64 rdba;
1855	struct e1000_hw *hw = &adapter->hw;
1856	u32 rdlen, rctl, rxcsum;
1857
1858	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859		rdlen = adapter->rx_ring[0].count *
1860			sizeof(struct e1000_rx_desc);
1861		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863	} else {
1864		rdlen = adapter->rx_ring[0].count *
1865			sizeof(struct e1000_rx_desc);
1866		adapter->clean_rx = e1000_clean_rx_irq;
1867		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868	}
1869
1870	/* disable receives while setting up the descriptors */
1871	rctl = er32(RCTL);
1872	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873
1874	/* set the Receive Delay Timer Register */
1875	ew32(RDTR, adapter->rx_int_delay);
1876
1877	if (hw->mac_type >= e1000_82540) {
1878		ew32(RADV, adapter->rx_abs_int_delay);
1879		if (adapter->itr_setting != 0)
1880			ew32(ITR, 1000000000 / (adapter->itr * 256));
1881	}
1882
1883	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1884	 * the Base and Length of the Rx Descriptor Ring
1885	 */
1886	switch (adapter->num_rx_queues) {
1887	case 1:
1888	default:
1889		rdba = adapter->rx_ring[0].dma;
1890		ew32(RDLEN, rdlen);
1891		ew32(RDBAH, (rdba >> 32));
1892		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893		ew32(RDT, 0);
1894		ew32(RDH, 0);
1895		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896					   E1000_RDH : E1000_82542_RDH);
1897		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898					   E1000_RDT : E1000_82542_RDT);
1899		break;
1900	}
1901
1902	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1903	if (hw->mac_type >= e1000_82543) {
1904		rxcsum = er32(RXCSUM);
1905		if (adapter->rx_csum)
1906			rxcsum |= E1000_RXCSUM_TUOFL;
1907		else
1908			/* don't need to clear IPPCSE as it defaults to 0 */
1909			rxcsum &= ~E1000_RXCSUM_TUOFL;
1910		ew32(RXCSUM, rxcsum);
1911	}
1912
1913	/* Enable Receives */
1914	ew32(RCTL, rctl | E1000_RCTL_EN);
1915}
1916
1917/**
1918 * e1000_free_tx_resources - Free Tx Resources per Queue
1919 * @adapter: board private structure
1920 * @tx_ring: Tx descriptor ring for a specific queue
1921 *
1922 * Free all transmit software resources
1923 **/
1924static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925				    struct e1000_tx_ring *tx_ring)
1926{
1927	struct pci_dev *pdev = adapter->pdev;
1928
1929	e1000_clean_tx_ring(adapter, tx_ring);
1930
1931	vfree(tx_ring->buffer_info);
1932	tx_ring->buffer_info = NULL;
1933
1934	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935			  tx_ring->dma);
1936
1937	tx_ring->desc = NULL;
1938}
1939
1940/**
1941 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1942 * @adapter: board private structure
1943 *
1944 * Free all transmit software resources
1945 **/
1946void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947{
1948	int i;
1949
1950	for (i = 0; i < adapter->num_tx_queues; i++)
1951		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952}
1953
1954static void
1955e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956				 struct e1000_tx_buffer *buffer_info,
1957				 int budget)
1958{
1959	if (buffer_info->dma) {
1960		if (buffer_info->mapped_as_page)
1961			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1962				       buffer_info->length, DMA_TO_DEVICE);
1963		else
1964			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1965					 buffer_info->length,
1966					 DMA_TO_DEVICE);
1967		buffer_info->dma = 0;
1968	}
1969	if (buffer_info->skb) {
1970		napi_consume_skb(buffer_info->skb, budget);
1971		buffer_info->skb = NULL;
1972	}
1973	buffer_info->time_stamp = 0;
1974	/* buffer_info must be completely set up in the transmit path */
1975}
1976
1977/**
1978 * e1000_clean_tx_ring - Free Tx Buffers
1979 * @adapter: board private structure
1980 * @tx_ring: ring to be cleaned
1981 **/
1982static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983				struct e1000_tx_ring *tx_ring)
1984{
1985	struct e1000_hw *hw = &adapter->hw;
1986	struct e1000_tx_buffer *buffer_info;
1987	unsigned long size;
1988	unsigned int i;
1989
1990	/* Free all the Tx ring sk_buffs */
1991
1992	for (i = 0; i < tx_ring->count; i++) {
1993		buffer_info = &tx_ring->buffer_info[i];
1994		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1995	}
1996
1997	netdev_reset_queue(adapter->netdev);
1998	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1999	memset(tx_ring->buffer_info, 0, size);
2000
2001	/* Zero out the descriptor ring */
2002
2003	memset(tx_ring->desc, 0, tx_ring->size);
2004
2005	tx_ring->next_to_use = 0;
2006	tx_ring->next_to_clean = 0;
2007	tx_ring->last_tx_tso = false;
2008
2009	writel(0, hw->hw_addr + tx_ring->tdh);
2010	writel(0, hw->hw_addr + tx_ring->tdt);
2011}
2012
2013/**
2014 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2015 * @adapter: board private structure
2016 **/
2017static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018{
2019	int i;
2020
2021	for (i = 0; i < adapter->num_tx_queues; i++)
2022		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023}
2024
2025/**
2026 * e1000_free_rx_resources - Free Rx Resources
2027 * @adapter: board private structure
2028 * @rx_ring: ring to clean the resources from
2029 *
2030 * Free all receive software resources
2031 **/
2032static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033				    struct e1000_rx_ring *rx_ring)
2034{
2035	struct pci_dev *pdev = adapter->pdev;
2036
2037	e1000_clean_rx_ring(adapter, rx_ring);
2038
2039	vfree(rx_ring->buffer_info);
2040	rx_ring->buffer_info = NULL;
2041
2042	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043			  rx_ring->dma);
2044
2045	rx_ring->desc = NULL;
2046}
2047
2048/**
2049 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2050 * @adapter: board private structure
2051 *
2052 * Free all receive software resources
2053 **/
2054void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2055{
2056	int i;
2057
2058	for (i = 0; i < adapter->num_rx_queues; i++)
2059		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2060}
2061
2062#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2063static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2064{
2065	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2066		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2067}
2068
2069static void *e1000_alloc_frag(const struct e1000_adapter *a)
2070{
2071	unsigned int len = e1000_frag_len(a);
2072	u8 *data = netdev_alloc_frag(len);
2073
2074	if (likely(data))
2075		data += E1000_HEADROOM;
2076	return data;
2077}
2078
2079/**
2080 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2081 * @adapter: board private structure
2082 * @rx_ring: ring to free buffers from
2083 **/
2084static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2085				struct e1000_rx_ring *rx_ring)
2086{
2087	struct e1000_hw *hw = &adapter->hw;
2088	struct e1000_rx_buffer *buffer_info;
2089	struct pci_dev *pdev = adapter->pdev;
2090	unsigned long size;
2091	unsigned int i;
2092
2093	/* Free all the Rx netfrags */
2094	for (i = 0; i < rx_ring->count; i++) {
2095		buffer_info = &rx_ring->buffer_info[i];
2096		if (adapter->clean_rx == e1000_clean_rx_irq) {
2097			if (buffer_info->dma)
2098				dma_unmap_single(&pdev->dev, buffer_info->dma,
2099						 adapter->rx_buffer_len,
2100						 DMA_FROM_DEVICE);
2101			if (buffer_info->rxbuf.data) {
2102				skb_free_frag(buffer_info->rxbuf.data);
2103				buffer_info->rxbuf.data = NULL;
2104			}
2105		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2106			if (buffer_info->dma)
2107				dma_unmap_page(&pdev->dev, buffer_info->dma,
2108					       adapter->rx_buffer_len,
2109					       DMA_FROM_DEVICE);
2110			if (buffer_info->rxbuf.page) {
2111				put_page(buffer_info->rxbuf.page);
2112				buffer_info->rxbuf.page = NULL;
2113			}
2114		}
2115
2116		buffer_info->dma = 0;
2117	}
2118
2119	/* there also may be some cached data from a chained receive */
2120	napi_free_frags(&adapter->napi);
2121	rx_ring->rx_skb_top = NULL;
2122
2123	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2124	memset(rx_ring->buffer_info, 0, size);
2125
2126	/* Zero out the descriptor ring */
2127	memset(rx_ring->desc, 0, rx_ring->size);
2128
2129	rx_ring->next_to_clean = 0;
2130	rx_ring->next_to_use = 0;
2131
2132	writel(0, hw->hw_addr + rx_ring->rdh);
2133	writel(0, hw->hw_addr + rx_ring->rdt);
2134}
2135
2136/**
2137 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2138 * @adapter: board private structure
2139 **/
2140static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2141{
2142	int i;
2143
2144	for (i = 0; i < adapter->num_rx_queues; i++)
2145		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2146}
2147
2148/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2149 * and memory write and invalidate disabled for certain operations
2150 */
2151static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2152{
2153	struct e1000_hw *hw = &adapter->hw;
2154	struct net_device *netdev = adapter->netdev;
2155	u32 rctl;
2156
2157	e1000_pci_clear_mwi(hw);
2158
2159	rctl = er32(RCTL);
2160	rctl |= E1000_RCTL_RST;
2161	ew32(RCTL, rctl);
2162	E1000_WRITE_FLUSH();
2163	mdelay(5);
2164
2165	if (netif_running(netdev))
2166		e1000_clean_all_rx_rings(adapter);
2167}
2168
2169static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2170{
2171	struct e1000_hw *hw = &adapter->hw;
2172	struct net_device *netdev = adapter->netdev;
2173	u32 rctl;
2174
2175	rctl = er32(RCTL);
2176	rctl &= ~E1000_RCTL_RST;
2177	ew32(RCTL, rctl);
2178	E1000_WRITE_FLUSH();
2179	mdelay(5);
2180
2181	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2182		e1000_pci_set_mwi(hw);
2183
2184	if (netif_running(netdev)) {
2185		/* No need to loop, because 82542 supports only 1 queue */
2186		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2187		e1000_configure_rx(adapter);
2188		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2189	}
2190}
2191
2192/**
2193 * e1000_set_mac - Change the Ethernet Address of the NIC
2194 * @netdev: network interface device structure
2195 * @p: pointer to an address structure
2196 *
2197 * Returns 0 on success, negative on failure
2198 **/
2199static int e1000_set_mac(struct net_device *netdev, void *p)
2200{
2201	struct e1000_adapter *adapter = netdev_priv(netdev);
2202	struct e1000_hw *hw = &adapter->hw;
2203	struct sockaddr *addr = p;
2204
2205	if (!is_valid_ether_addr(addr->sa_data))
2206		return -EADDRNOTAVAIL;
2207
2208	/* 82542 2.0 needs to be in reset to write receive address registers */
2209
2210	if (hw->mac_type == e1000_82542_rev2_0)
2211		e1000_enter_82542_rst(adapter);
2212
2213	eth_hw_addr_set(netdev, addr->sa_data);
2214	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2215
2216	e1000_rar_set(hw, hw->mac_addr, 0);
2217
2218	if (hw->mac_type == e1000_82542_rev2_0)
2219		e1000_leave_82542_rst(adapter);
2220
2221	return 0;
2222}
2223
2224/**
2225 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2226 * @netdev: network interface device structure
2227 *
2228 * The set_rx_mode entry point is called whenever the unicast or multicast
2229 * address lists or the network interface flags are updated. This routine is
2230 * responsible for configuring the hardware for proper unicast, multicast,
2231 * promiscuous mode, and all-multi behavior.
2232 **/
2233static void e1000_set_rx_mode(struct net_device *netdev)
2234{
2235	struct e1000_adapter *adapter = netdev_priv(netdev);
2236	struct e1000_hw *hw = &adapter->hw;
2237	struct netdev_hw_addr *ha;
2238	bool use_uc = false;
2239	u32 rctl;
2240	u32 hash_value;
2241	int i, rar_entries = E1000_RAR_ENTRIES;
2242	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2243	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2244
2245	if (!mcarray)
2246		return;
2247
2248	/* Check for Promiscuous and All Multicast modes */
2249
2250	rctl = er32(RCTL);
2251
2252	if (netdev->flags & IFF_PROMISC) {
2253		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254		rctl &= ~E1000_RCTL_VFE;
2255	} else {
2256		if (netdev->flags & IFF_ALLMULTI)
2257			rctl |= E1000_RCTL_MPE;
2258		else
2259			rctl &= ~E1000_RCTL_MPE;
2260		/* Enable VLAN filter if there is a VLAN */
2261		if (e1000_vlan_used(adapter))
2262			rctl |= E1000_RCTL_VFE;
2263	}
2264
2265	if (netdev_uc_count(netdev) > rar_entries - 1) {
2266		rctl |= E1000_RCTL_UPE;
2267	} else if (!(netdev->flags & IFF_PROMISC)) {
2268		rctl &= ~E1000_RCTL_UPE;
2269		use_uc = true;
2270	}
2271
2272	ew32(RCTL, rctl);
2273
2274	/* 82542 2.0 needs to be in reset to write receive address registers */
2275
2276	if (hw->mac_type == e1000_82542_rev2_0)
2277		e1000_enter_82542_rst(adapter);
2278
2279	/* load the first 14 addresses into the exact filters 1-14. Unicast
2280	 * addresses take precedence to avoid disabling unicast filtering
2281	 * when possible.
2282	 *
2283	 * RAR 0 is used for the station MAC address
2284	 * if there are not 14 addresses, go ahead and clear the filters
2285	 */
2286	i = 1;
2287	if (use_uc)
2288		netdev_for_each_uc_addr(ha, netdev) {
2289			if (i == rar_entries)
2290				break;
2291			e1000_rar_set(hw, ha->addr, i++);
2292		}
2293
2294	netdev_for_each_mc_addr(ha, netdev) {
2295		if (i == rar_entries) {
2296			/* load any remaining addresses into the hash table */
2297			u32 hash_reg, hash_bit, mta;
2298			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299			hash_reg = (hash_value >> 5) & 0x7F;
2300			hash_bit = hash_value & 0x1F;
2301			mta = (1 << hash_bit);
2302			mcarray[hash_reg] |= mta;
2303		} else {
2304			e1000_rar_set(hw, ha->addr, i++);
2305		}
2306	}
2307
2308	for (; i < rar_entries; i++) {
2309		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310		E1000_WRITE_FLUSH();
2311		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312		E1000_WRITE_FLUSH();
2313	}
2314
2315	/* write the hash table completely, write from bottom to avoid
2316	 * both stupid write combining chipsets, and flushing each write
2317	 */
2318	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2319		/* If we are on an 82544 has an errata where writing odd
2320		 * offsets overwrites the previous even offset, but writing
2321		 * backwards over the range solves the issue by always
2322		 * writing the odd offset first
2323		 */
2324		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325	}
2326	E1000_WRITE_FLUSH();
2327
2328	if (hw->mac_type == e1000_82542_rev2_0)
2329		e1000_leave_82542_rst(adapter);
2330
2331	kfree(mcarray);
2332}
2333
2334/**
2335 * e1000_update_phy_info_task - get phy info
2336 * @work: work struct contained inside adapter struct
2337 *
2338 * Need to wait a few seconds after link up to get diagnostic information from
2339 * the phy
2340 */
2341static void e1000_update_phy_info_task(struct work_struct *work)
2342{
2343	struct e1000_adapter *adapter = container_of(work,
2344						     struct e1000_adapter,
2345						     phy_info_task.work);
2346
2347	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2348}
2349
2350/**
2351 * e1000_82547_tx_fifo_stall_task - task to complete work
2352 * @work: work struct contained inside adapter struct
2353 **/
2354static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355{
2356	struct e1000_adapter *adapter = container_of(work,
2357						     struct e1000_adapter,
2358						     fifo_stall_task.work);
2359	struct e1000_hw *hw = &adapter->hw;
2360	struct net_device *netdev = adapter->netdev;
2361	u32 tctl;
2362
2363	if (atomic_read(&adapter->tx_fifo_stall)) {
2364		if ((er32(TDT) == er32(TDH)) &&
2365		   (er32(TDFT) == er32(TDFH)) &&
2366		   (er32(TDFTS) == er32(TDFHS))) {
2367			tctl = er32(TCTL);
2368			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2369			ew32(TDFT, adapter->tx_head_addr);
2370			ew32(TDFH, adapter->tx_head_addr);
2371			ew32(TDFTS, adapter->tx_head_addr);
2372			ew32(TDFHS, adapter->tx_head_addr);
2373			ew32(TCTL, tctl);
2374			E1000_WRITE_FLUSH();
2375
2376			adapter->tx_fifo_head = 0;
2377			atomic_set(&adapter->tx_fifo_stall, 0);
2378			netif_wake_queue(netdev);
2379		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2380			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2381		}
2382	}
2383}
2384
2385bool e1000_has_link(struct e1000_adapter *adapter)
2386{
2387	struct e1000_hw *hw = &adapter->hw;
2388	bool link_active = false;
2389
2390	/* get_link_status is set on LSC (link status) interrupt or rx
2391	 * sequence error interrupt (except on intel ce4100).
2392	 * get_link_status will stay false until the
2393	 * e1000_check_for_link establishes link for copper adapters
2394	 * ONLY
2395	 */
2396	switch (hw->media_type) {
2397	case e1000_media_type_copper:
2398		if (hw->mac_type == e1000_ce4100)
2399			hw->get_link_status = 1;
2400		if (hw->get_link_status) {
2401			e1000_check_for_link(hw);
2402			link_active = !hw->get_link_status;
2403		} else {
2404			link_active = true;
2405		}
2406		break;
2407	case e1000_media_type_fiber:
2408		e1000_check_for_link(hw);
2409		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410		break;
2411	case e1000_media_type_internal_serdes:
2412		e1000_check_for_link(hw);
2413		link_active = hw->serdes_has_link;
2414		break;
2415	default:
2416		break;
2417	}
2418
2419	return link_active;
2420}
2421
2422/**
2423 * e1000_watchdog - work function
2424 * @work: work struct contained inside adapter struct
2425 **/
2426static void e1000_watchdog(struct work_struct *work)
2427{
2428	struct e1000_adapter *adapter = container_of(work,
2429						     struct e1000_adapter,
2430						     watchdog_task.work);
2431	struct e1000_hw *hw = &adapter->hw;
2432	struct net_device *netdev = adapter->netdev;
2433	struct e1000_tx_ring *txdr = adapter->tx_ring;
2434	u32 link, tctl;
2435
2436	link = e1000_has_link(adapter);
2437	if ((netif_carrier_ok(netdev)) && link)
2438		goto link_up;
2439
2440	if (link) {
2441		if (!netif_carrier_ok(netdev)) {
2442			u32 ctrl;
 
2443			/* update snapshot of PHY registers on LSC */
2444			e1000_get_speed_and_duplex(hw,
2445						   &adapter->link_speed,
2446						   &adapter->link_duplex);
2447
2448			ctrl = er32(CTRL);
2449			pr_info("%s NIC Link is Up %d Mbps %s, "
2450				"Flow Control: %s\n",
2451				netdev->name,
2452				adapter->link_speed,
2453				adapter->link_duplex == FULL_DUPLEX ?
2454				"Full Duplex" : "Half Duplex",
2455				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2456				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2457				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2458				E1000_CTRL_TFCE) ? "TX" : "None")));
2459
2460			/* adjust timeout factor according to speed/duplex */
2461			adapter->tx_timeout_factor = 1;
2462			switch (adapter->link_speed) {
2463			case SPEED_10:
 
2464				adapter->tx_timeout_factor = 16;
2465				break;
2466			case SPEED_100:
 
2467				/* maybe add some timeout factor ? */
2468				break;
2469			}
2470
2471			/* enable transmits in the hardware */
2472			tctl = er32(TCTL);
2473			tctl |= E1000_TCTL_EN;
2474			ew32(TCTL, tctl);
2475
2476			netif_carrier_on(netdev);
2477			if (!test_bit(__E1000_DOWN, &adapter->flags))
2478				schedule_delayed_work(&adapter->phy_info_task,
2479						      2 * HZ);
2480			adapter->smartspeed = 0;
2481		}
2482	} else {
2483		if (netif_carrier_ok(netdev)) {
2484			adapter->link_speed = 0;
2485			adapter->link_duplex = 0;
2486			pr_info("%s NIC Link is Down\n",
2487				netdev->name);
2488			netif_carrier_off(netdev);
2489
2490			if (!test_bit(__E1000_DOWN, &adapter->flags))
2491				schedule_delayed_work(&adapter->phy_info_task,
2492						      2 * HZ);
2493		}
2494
2495		e1000_smartspeed(adapter);
2496	}
2497
2498link_up:
2499	e1000_update_stats(adapter);
2500
2501	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2502	adapter->tpt_old = adapter->stats.tpt;
2503	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2504	adapter->colc_old = adapter->stats.colc;
2505
2506	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2507	adapter->gorcl_old = adapter->stats.gorcl;
2508	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2509	adapter->gotcl_old = adapter->stats.gotcl;
2510
2511	e1000_update_adaptive(hw);
2512
2513	if (!netif_carrier_ok(netdev)) {
2514		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2515			/* We've lost link, so the controller stops DMA,
2516			 * but we've got queued Tx work that's never going
2517			 * to get done, so reset controller to flush Tx.
2518			 * (Do the reset outside of interrupt context).
2519			 */
2520			adapter->tx_timeout_count++;
2521			schedule_work(&adapter->reset_task);
2522			/* exit immediately since reset is imminent */
2523			return;
2524		}
2525	}
2526
2527	/* Simple mode for Interrupt Throttle Rate (ITR) */
2528	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2529		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2530		 * Total asymmetrical Tx or Rx gets ITR=8000;
2531		 * everyone else is between 2000-8000.
2532		 */
2533		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2534		u32 dif = (adapter->gotcl > adapter->gorcl ?
2535			    adapter->gotcl - adapter->gorcl :
2536			    adapter->gorcl - adapter->gotcl) / 10000;
2537		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2538
2539		ew32(ITR, 1000000000 / (itr * 256));
2540	}
2541
2542	/* Cause software interrupt to ensure rx ring is cleaned */
2543	ew32(ICS, E1000_ICS_RXDMT0);
2544
2545	/* Force detection of hung controller every watchdog period */
2546	adapter->detect_tx_hung = true;
2547
2548	/* Reschedule the task */
2549	if (!test_bit(__E1000_DOWN, &adapter->flags))
2550		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2551}
2552
2553enum latency_range {
2554	lowest_latency = 0,
2555	low_latency = 1,
2556	bulk_latency = 2,
2557	latency_invalid = 255
2558};
2559
2560/**
2561 * e1000_update_itr - update the dynamic ITR value based on statistics
2562 * @adapter: pointer to adapter
2563 * @itr_setting: current adapter->itr
2564 * @packets: the number of packets during this measurement interval
2565 * @bytes: the number of bytes during this measurement interval
2566 *
2567 *      Stores a new ITR value based on packets and byte
2568 *      counts during the last interrupt.  The advantage of per interrupt
2569 *      computation is faster updates and more accurate ITR for the current
2570 *      traffic pattern.  Constants in this function were computed
2571 *      based on theoretical maximum wire speed and thresholds were set based
2572 *      on testing data as well as attempting to minimize response time
2573 *      while increasing bulk throughput.
2574 *      this functionality is controlled by the InterruptThrottleRate module
2575 *      parameter (see e1000_param.c)
2576 **/
2577static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2578				     u16 itr_setting, int packets, int bytes)
2579{
2580	unsigned int retval = itr_setting;
2581	struct e1000_hw *hw = &adapter->hw;
2582
2583	if (unlikely(hw->mac_type < e1000_82540))
2584		goto update_itr_done;
2585
2586	if (packets == 0)
2587		goto update_itr_done;
2588
2589	switch (itr_setting) {
2590	case lowest_latency:
2591		/* jumbo frames get bulk treatment*/
2592		if (bytes/packets > 8000)
2593			retval = bulk_latency;
2594		else if ((packets < 5) && (bytes > 512))
2595			retval = low_latency;
2596		break;
2597	case low_latency:  /* 50 usec aka 20000 ints/s */
2598		if (bytes > 10000) {
2599			/* jumbo frames need bulk latency setting */
2600			if (bytes/packets > 8000)
2601				retval = bulk_latency;
2602			else if ((packets < 10) || ((bytes/packets) > 1200))
2603				retval = bulk_latency;
2604			else if ((packets > 35))
2605				retval = lowest_latency;
2606		} else if (bytes/packets > 2000)
2607			retval = bulk_latency;
2608		else if (packets <= 2 && bytes < 512)
2609			retval = lowest_latency;
2610		break;
2611	case bulk_latency: /* 250 usec aka 4000 ints/s */
2612		if (bytes > 25000) {
2613			if (packets > 35)
2614				retval = low_latency;
2615		} else if (bytes < 6000) {
2616			retval = low_latency;
2617		}
2618		break;
2619	}
2620
2621update_itr_done:
2622	return retval;
2623}
2624
2625static void e1000_set_itr(struct e1000_adapter *adapter)
2626{
2627	struct e1000_hw *hw = &adapter->hw;
2628	u16 current_itr;
2629	u32 new_itr = adapter->itr;
2630
2631	if (unlikely(hw->mac_type < e1000_82540))
2632		return;
2633
2634	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2635	if (unlikely(adapter->link_speed != SPEED_1000)) {
 
2636		new_itr = 4000;
2637		goto set_itr_now;
2638	}
2639
2640	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641					   adapter->total_tx_packets,
2642					   adapter->total_tx_bytes);
2643	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2644	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645		adapter->tx_itr = low_latency;
2646
2647	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648					   adapter->total_rx_packets,
2649					   adapter->total_rx_bytes);
2650	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2651	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652		adapter->rx_itr = low_latency;
2653
2654	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655
2656	switch (current_itr) {
2657	/* counts and packets in update_itr are dependent on these numbers */
2658	case lowest_latency:
2659		new_itr = 70000;
2660		break;
2661	case low_latency:
2662		new_itr = 20000; /* aka hwitr = ~200 */
2663		break;
2664	case bulk_latency:
2665		new_itr = 4000;
2666		break;
2667	default:
2668		break;
2669	}
2670
2671set_itr_now:
2672	if (new_itr != adapter->itr) {
2673		/* this attempts to bias the interrupt rate towards Bulk
2674		 * by adding intermediate steps when interrupt rate is
2675		 * increasing
2676		 */
2677		new_itr = new_itr > adapter->itr ?
2678			  min(adapter->itr + (new_itr >> 2), new_itr) :
2679			  new_itr;
2680		adapter->itr = new_itr;
2681		ew32(ITR, 1000000000 / (new_itr * 256));
2682	}
2683}
2684
2685#define E1000_TX_FLAGS_CSUM		0x00000001
2686#define E1000_TX_FLAGS_VLAN		0x00000002
2687#define E1000_TX_FLAGS_TSO		0x00000004
2688#define E1000_TX_FLAGS_IPV4		0x00000008
2689#define E1000_TX_FLAGS_NO_FCS		0x00000010
2690#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2691#define E1000_TX_FLAGS_VLAN_SHIFT	16
2692
2693static int e1000_tso(struct e1000_adapter *adapter,
2694		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2695		     __be16 protocol)
2696{
2697	struct e1000_context_desc *context_desc;
2698	struct e1000_tx_buffer *buffer_info;
2699	unsigned int i;
2700	u32 cmd_length = 0;
2701	u16 ipcse = 0, tucse, mss;
2702	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703
2704	if (skb_is_gso(skb)) {
2705		int err;
2706
2707		err = skb_cow_head(skb, 0);
2708		if (err < 0)
2709			return err;
2710
2711		hdr_len = skb_tcp_all_headers(skb);
2712		mss = skb_shinfo(skb)->gso_size;
2713		if (protocol == htons(ETH_P_IP)) {
2714			struct iphdr *iph = ip_hdr(skb);
2715			iph->tot_len = 0;
2716			iph->check = 0;
2717			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718								 iph->daddr, 0,
2719								 IPPROTO_TCP,
2720								 0);
2721			cmd_length = E1000_TXD_CMD_IP;
2722			ipcse = skb_transport_offset(skb) - 1;
2723		} else if (skb_is_gso_v6(skb)) {
2724			tcp_v6_gso_csum_prep(skb);
 
 
 
 
2725			ipcse = 0;
2726		}
2727		ipcss = skb_network_offset(skb);
2728		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729		tucss = skb_transport_offset(skb);
2730		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731		tucse = 0;
2732
2733		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735
2736		i = tx_ring->next_to_use;
2737		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738		buffer_info = &tx_ring->buffer_info[i];
2739
2740		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2741		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2742		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2743		context_desc->upper_setup.tcp_fields.tucss = tucss;
2744		context_desc->upper_setup.tcp_fields.tucso = tucso;
2745		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2747		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749
2750		buffer_info->time_stamp = jiffies;
2751		buffer_info->next_to_watch = i;
2752
2753		if (++i == tx_ring->count)
2754			i = 0;
2755
2756		tx_ring->next_to_use = i;
2757
2758		return true;
2759	}
2760	return false;
2761}
2762
2763static bool e1000_tx_csum(struct e1000_adapter *adapter,
2764			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2765			  __be16 protocol)
2766{
2767	struct e1000_context_desc *context_desc;
2768	struct e1000_tx_buffer *buffer_info;
2769	unsigned int i;
2770	u8 css;
2771	u32 cmd_len = E1000_TXD_CMD_DEXT;
2772
2773	if (skb->ip_summed != CHECKSUM_PARTIAL)
2774		return false;
2775
2776	switch (protocol) {
2777	case cpu_to_be16(ETH_P_IP):
2778		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779			cmd_len |= E1000_TXD_CMD_TCP;
2780		break;
2781	case cpu_to_be16(ETH_P_IPV6):
2782		/* XXX not handling all IPV6 headers */
2783		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784			cmd_len |= E1000_TXD_CMD_TCP;
2785		break;
2786	default:
2787		if (unlikely(net_ratelimit()))
2788			e_warn(drv, "checksum_partial proto=%x!\n",
2789			       skb->protocol);
2790		break;
2791	}
2792
2793	css = skb_checksum_start_offset(skb);
2794
2795	i = tx_ring->next_to_use;
2796	buffer_info = &tx_ring->buffer_info[i];
2797	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798
2799	context_desc->lower_setup.ip_config = 0;
2800	context_desc->upper_setup.tcp_fields.tucss = css;
2801	context_desc->upper_setup.tcp_fields.tucso =
2802		css + skb->csum_offset;
2803	context_desc->upper_setup.tcp_fields.tucse = 0;
2804	context_desc->tcp_seg_setup.data = 0;
2805	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806
2807	buffer_info->time_stamp = jiffies;
2808	buffer_info->next_to_watch = i;
2809
2810	if (unlikely(++i == tx_ring->count))
2811		i = 0;
2812
2813	tx_ring->next_to_use = i;
2814
2815	return true;
2816}
2817
2818#define E1000_MAX_TXD_PWR	12
2819#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2820
2821static int e1000_tx_map(struct e1000_adapter *adapter,
2822			struct e1000_tx_ring *tx_ring,
2823			struct sk_buff *skb, unsigned int first,
2824			unsigned int max_per_txd, unsigned int nr_frags,
2825			unsigned int mss)
2826{
2827	struct e1000_hw *hw = &adapter->hw;
2828	struct pci_dev *pdev = adapter->pdev;
2829	struct e1000_tx_buffer *buffer_info;
2830	unsigned int len = skb_headlen(skb);
2831	unsigned int offset = 0, size, count = 0, i;
2832	unsigned int f, bytecount, segs;
2833
2834	i = tx_ring->next_to_use;
2835
2836	while (len) {
2837		buffer_info = &tx_ring->buffer_info[i];
2838		size = min(len, max_per_txd);
2839		/* Workaround for Controller erratum --
2840		 * descriptor for non-tso packet in a linear SKB that follows a
2841		 * tso gets written back prematurely before the data is fully
2842		 * DMA'd to the controller
2843		 */
2844		if (!skb->data_len && tx_ring->last_tx_tso &&
2845		    !skb_is_gso(skb)) {
2846			tx_ring->last_tx_tso = false;
2847			size -= 4;
2848		}
2849
2850		/* Workaround for premature desc write-backs
2851		 * in TSO mode.  Append 4-byte sentinel desc
2852		 */
2853		if (unlikely(mss && !nr_frags && size == len && size > 8))
2854			size -= 4;
2855		/* work-around for errata 10 and it applies
2856		 * to all controllers in PCI-X mode
2857		 * The fix is to make sure that the first descriptor of a
2858		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2859		 */
2860		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861			     (size > 2015) && count == 0))
2862			size = 2015;
2863
2864		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2865		 * terminating buffers within evenly-aligned dwords.
2866		 */
2867		if (unlikely(adapter->pcix_82544 &&
2868		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869		   size > 4))
2870			size -= 4;
2871
2872		buffer_info->length = size;
2873		/* set time_stamp *before* dma to help avoid a possible race */
2874		buffer_info->time_stamp = jiffies;
2875		buffer_info->mapped_as_page = false;
2876		buffer_info->dma = dma_map_single(&pdev->dev,
2877						  skb->data + offset,
2878						  size, DMA_TO_DEVICE);
2879		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880			goto dma_error;
2881		buffer_info->next_to_watch = i;
2882
2883		len -= size;
2884		offset += size;
2885		count++;
2886		if (len) {
2887			i++;
2888			if (unlikely(i == tx_ring->count))
2889				i = 0;
2890		}
2891	}
2892
2893	for (f = 0; f < nr_frags; f++) {
2894		const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895
 
2896		len = skb_frag_size(frag);
2897		offset = 0;
2898
2899		while (len) {
2900			unsigned long bufend;
2901			i++;
2902			if (unlikely(i == tx_ring->count))
2903				i = 0;
2904
2905			buffer_info = &tx_ring->buffer_info[i];
2906			size = min(len, max_per_txd);
2907			/* Workaround for premature desc write-backs
2908			 * in TSO mode.  Append 4-byte sentinel desc
2909			 */
2910			if (unlikely(mss && f == (nr_frags-1) &&
2911			    size == len && size > 8))
2912				size -= 4;
2913			/* Workaround for potential 82544 hang in PCI-X.
2914			 * Avoid terminating buffers within evenly-aligned
2915			 * dwords.
2916			 */
2917			bufend = (unsigned long)
2918				page_to_phys(skb_frag_page(frag));
2919			bufend += offset + size - 1;
2920			if (unlikely(adapter->pcix_82544 &&
2921				     !(bufend & 4) &&
2922				     size > 4))
2923				size -= 4;
2924
2925			buffer_info->length = size;
2926			buffer_info->time_stamp = jiffies;
2927			buffer_info->mapped_as_page = true;
2928			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929						offset, size, DMA_TO_DEVICE);
2930			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931				goto dma_error;
2932			buffer_info->next_to_watch = i;
2933
2934			len -= size;
2935			offset += size;
2936			count++;
2937		}
2938	}
2939
2940	segs = skb_shinfo(skb)->gso_segs ?: 1;
2941	/* multiply data chunks by size of headers */
2942	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2943
2944	tx_ring->buffer_info[i].skb = skb;
2945	tx_ring->buffer_info[i].segs = segs;
2946	tx_ring->buffer_info[i].bytecount = bytecount;
2947	tx_ring->buffer_info[first].next_to_watch = i;
2948
2949	return count;
2950
2951dma_error:
2952	dev_err(&pdev->dev, "TX DMA map failed\n");
2953	buffer_info->dma = 0;
2954	if (count)
2955		count--;
2956
2957	while (count--) {
2958		if (i == 0)
2959			i += tx_ring->count;
2960		i--;
2961		buffer_info = &tx_ring->buffer_info[i];
2962		e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2963	}
2964
2965	return 0;
2966}
2967
2968static void e1000_tx_queue(struct e1000_adapter *adapter,
2969			   struct e1000_tx_ring *tx_ring, int tx_flags,
2970			   int count)
2971{
2972	struct e1000_tx_desc *tx_desc = NULL;
2973	struct e1000_tx_buffer *buffer_info;
2974	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975	unsigned int i;
2976
2977	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979			     E1000_TXD_CMD_TSE;
2980		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981
2982		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984	}
2985
2986	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989	}
2990
2991	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992		txd_lower |= E1000_TXD_CMD_VLE;
2993		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994	}
2995
2996	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2997		txd_lower &= ~(E1000_TXD_CMD_IFCS);
2998
2999	i = tx_ring->next_to_use;
3000
3001	while (count--) {
3002		buffer_info = &tx_ring->buffer_info[i];
3003		tx_desc = E1000_TX_DESC(*tx_ring, i);
3004		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005		tx_desc->lower.data =
3006			cpu_to_le32(txd_lower | buffer_info->length);
3007		tx_desc->upper.data = cpu_to_le32(txd_upper);
3008		if (unlikely(++i == tx_ring->count))
3009			i = 0;
3010	}
3011
3012	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013
3014	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3015	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3016		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3017
3018	/* Force memory writes to complete before letting h/w
3019	 * know there are new descriptors to fetch.  (Only
3020	 * applicable for weak-ordered memory model archs,
3021	 * such as IA-64).
3022	 */
3023	dma_wmb();
3024
3025	tx_ring->next_to_use = i;
3026}
3027
3028/* 82547 workaround to avoid controller hang in half-duplex environment.
3029 * The workaround is to avoid queuing a large packet that would span
3030 * the internal Tx FIFO ring boundary by notifying the stack to resend
3031 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3032 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3033 * to the beginning of the Tx FIFO.
3034 */
3035
3036#define E1000_FIFO_HDR			0x10
3037#define E1000_82547_PAD_LEN		0x3E0
3038
3039static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040				       struct sk_buff *skb)
3041{
3042	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044
3045	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046
3047	if (adapter->link_duplex != HALF_DUPLEX)
3048		goto no_fifo_stall_required;
3049
3050	if (atomic_read(&adapter->tx_fifo_stall))
3051		return 1;
3052
3053	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054		atomic_set(&adapter->tx_fifo_stall, 1);
3055		return 1;
3056	}
3057
3058no_fifo_stall_required:
3059	adapter->tx_fifo_head += skb_fifo_len;
3060	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062	return 0;
3063}
3064
3065static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066{
3067	struct e1000_adapter *adapter = netdev_priv(netdev);
3068	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069
3070	netif_stop_queue(netdev);
3071	/* Herbert's original patch had:
3072	 *  smp_mb__after_netif_stop_queue();
3073	 * but since that doesn't exist yet, just open code it.
3074	 */
3075	smp_mb();
3076
3077	/* We need to check again in a case another CPU has just
3078	 * made room available.
3079	 */
3080	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081		return -EBUSY;
3082
3083	/* A reprieve! */
3084	netif_start_queue(netdev);
3085	++adapter->restart_queue;
3086	return 0;
3087}
3088
3089static int e1000_maybe_stop_tx(struct net_device *netdev,
3090			       struct e1000_tx_ring *tx_ring, int size)
3091{
3092	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093		return 0;
3094	return __e1000_maybe_stop_tx(netdev, size);
3095}
3096
3097#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3098static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099				    struct net_device *netdev)
3100{
3101	struct e1000_adapter *adapter = netdev_priv(netdev);
3102	struct e1000_hw *hw = &adapter->hw;
3103	struct e1000_tx_ring *tx_ring;
3104	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106	unsigned int tx_flags = 0;
3107	unsigned int len = skb_headlen(skb);
3108	unsigned int nr_frags;
3109	unsigned int mss;
3110	int count = 0;
3111	int tso;
3112	unsigned int f;
3113	__be16 protocol = vlan_get_protocol(skb);
3114
3115	/* This goes back to the question of how to logically map a Tx queue
3116	 * to a flow.  Right now, performance is impacted slightly negatively
3117	 * if using multiple Tx queues.  If the stack breaks away from a
3118	 * single qdisc implementation, we can look at this again.
3119	 */
3120	tx_ring = adapter->tx_ring;
3121
3122	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3123	 * packets may get corrupted during padding by HW.
3124	 * To WA this issue, pad all small packets manually.
3125	 */
3126	if (eth_skb_pad(skb))
3127		return NETDEV_TX_OK;
3128
3129	mss = skb_shinfo(skb)->gso_size;
3130	/* The controller does a simple calculation to
3131	 * make sure there is enough room in the FIFO before
3132	 * initiating the DMA for each buffer.  The calc is:
3133	 * 4 = ceil(buffer len/mss).  To make sure we don't
3134	 * overrun the FIFO, adjust the max buffer len if mss
3135	 * drops.
3136	 */
3137	if (mss) {
3138		u8 hdr_len;
3139		max_per_txd = min(mss << 2, max_per_txd);
3140		max_txd_pwr = fls(max_per_txd) - 1;
3141
3142		hdr_len = skb_tcp_all_headers(skb);
3143		if (skb->data_len && hdr_len == len) {
3144			switch (hw->mac_type) {
3145			case e1000_82544: {
3146				unsigned int pull_size;
3147
3148				/* Make sure we have room to chop off 4 bytes,
3149				 * and that the end alignment will work out to
3150				 * this hardware's requirements
3151				 * NOTE: this is a TSO only workaround
3152				 * if end byte alignment not correct move us
3153				 * into the next dword
3154				 */
3155				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3156				    & 4)
3157					break;
 
3158				pull_size = min((unsigned int)4, skb->data_len);
3159				if (!__pskb_pull_tail(skb, pull_size)) {
3160					e_err(drv, "__pskb_pull_tail "
3161					      "failed.\n");
3162					dev_kfree_skb_any(skb);
3163					return NETDEV_TX_OK;
3164				}
3165				len = skb_headlen(skb);
3166				break;
3167			}
3168			default:
3169				/* do nothing */
3170				break;
3171			}
3172		}
3173	}
3174
3175	/* reserve a descriptor for the offload context */
3176	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177		count++;
3178	count++;
3179
3180	/* Controller Erratum workaround */
3181	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182		count++;
3183
3184	count += TXD_USE_COUNT(len, max_txd_pwr);
3185
3186	if (adapter->pcix_82544)
3187		count++;
3188
3189	/* work-around for errata 10 and it applies to all controllers
3190	 * in PCI-X mode, so add one more descriptor to the count
3191	 */
3192	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193			(len > 2015)))
3194		count++;
3195
3196	nr_frags = skb_shinfo(skb)->nr_frags;
3197	for (f = 0; f < nr_frags; f++)
3198		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199				       max_txd_pwr);
3200	if (adapter->pcix_82544)
3201		count += nr_frags;
3202
3203	/* need: count + 2 desc gap to keep tail from touching
3204	 * head, otherwise try next time
3205	 */
3206	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207		return NETDEV_TX_BUSY;
3208
3209	if (unlikely((hw->mac_type == e1000_82547) &&
3210		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3211		netif_stop_queue(netdev);
3212		if (!test_bit(__E1000_DOWN, &adapter->flags))
3213			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214		return NETDEV_TX_BUSY;
3215	}
3216
3217	if (skb_vlan_tag_present(skb)) {
3218		tx_flags |= E1000_TX_FLAGS_VLAN;
3219		tx_flags |= (skb_vlan_tag_get(skb) <<
3220			     E1000_TX_FLAGS_VLAN_SHIFT);
3221	}
3222
3223	first = tx_ring->next_to_use;
3224
3225	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226	if (tso < 0) {
3227		dev_kfree_skb_any(skb);
3228		return NETDEV_TX_OK;
3229	}
3230
3231	if (likely(tso)) {
3232		if (likely(hw->mac_type != e1000_82544))
3233			tx_ring->last_tx_tso = true;
3234		tx_flags |= E1000_TX_FLAGS_TSO;
3235	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236		tx_flags |= E1000_TX_FLAGS_CSUM;
3237
3238	if (protocol == htons(ETH_P_IP))
3239		tx_flags |= E1000_TX_FLAGS_IPV4;
3240
3241	if (unlikely(skb->no_fcs))
3242		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3243
3244	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245			     nr_frags, mss);
3246
3247	if (count) {
3248		/* The descriptors needed is higher than other Intel drivers
3249		 * due to a number of workarounds.  The breakdown is below:
3250		 * Data descriptors: MAX_SKB_FRAGS + 1
3251		 * Context Descriptor: 1
3252		 * Keep head from touching tail: 2
3253		 * Workarounds: 3
3254		 */
3255		int desc_needed = MAX_SKB_FRAGS + 7;
3256
3257		netdev_sent_queue(netdev, skb->len);
3258		skb_tx_timestamp(skb);
3259
3260		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261
3262		/* 82544 potentially requires twice as many data descriptors
3263		 * in order to guarantee buffers don't end on evenly-aligned
3264		 * dwords
3265		 */
3266		if (adapter->pcix_82544)
3267			desc_needed += MAX_SKB_FRAGS + 1;
3268
3269		/* Make sure there is space in the ring for the next send. */
3270		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271
3272		if (!netdev_xmit_more() ||
3273		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3274			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
 
 
 
 
 
3275		}
3276	} else {
3277		dev_kfree_skb_any(skb);
3278		tx_ring->buffer_info[first].time_stamp = 0;
3279		tx_ring->next_to_use = first;
3280	}
3281
3282	return NETDEV_TX_OK;
3283}
3284
3285#define NUM_REGS 38 /* 1 based count */
3286static void e1000_regdump(struct e1000_adapter *adapter)
3287{
3288	struct e1000_hw *hw = &adapter->hw;
3289	u32 regs[NUM_REGS];
3290	u32 *regs_buff = regs;
3291	int i = 0;
3292
3293	static const char * const reg_name[] = {
3294		"CTRL",  "STATUS",
3295		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297		"TIDV", "TXDCTL", "TADV", "TARC0",
3298		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299		"TXDCTL1", "TARC1",
3300		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303	};
3304
3305	regs_buff[0]  = er32(CTRL);
3306	regs_buff[1]  = er32(STATUS);
3307
3308	regs_buff[2]  = er32(RCTL);
3309	regs_buff[3]  = er32(RDLEN);
3310	regs_buff[4]  = er32(RDH);
3311	regs_buff[5]  = er32(RDT);
3312	regs_buff[6]  = er32(RDTR);
3313
3314	regs_buff[7]  = er32(TCTL);
3315	regs_buff[8]  = er32(TDBAL);
3316	regs_buff[9]  = er32(TDBAH);
3317	regs_buff[10] = er32(TDLEN);
3318	regs_buff[11] = er32(TDH);
3319	regs_buff[12] = er32(TDT);
3320	regs_buff[13] = er32(TIDV);
3321	regs_buff[14] = er32(TXDCTL);
3322	regs_buff[15] = er32(TADV);
3323	regs_buff[16] = er32(TARC0);
3324
3325	regs_buff[17] = er32(TDBAL1);
3326	regs_buff[18] = er32(TDBAH1);
3327	regs_buff[19] = er32(TDLEN1);
3328	regs_buff[20] = er32(TDH1);
3329	regs_buff[21] = er32(TDT1);
3330	regs_buff[22] = er32(TXDCTL1);
3331	regs_buff[23] = er32(TARC1);
3332	regs_buff[24] = er32(CTRL_EXT);
3333	regs_buff[25] = er32(ERT);
3334	regs_buff[26] = er32(RDBAL0);
3335	regs_buff[27] = er32(RDBAH0);
3336	regs_buff[28] = er32(TDFH);
3337	regs_buff[29] = er32(TDFT);
3338	regs_buff[30] = er32(TDFHS);
3339	regs_buff[31] = er32(TDFTS);
3340	regs_buff[32] = er32(TDFPC);
3341	regs_buff[33] = er32(RDFH);
3342	regs_buff[34] = er32(RDFT);
3343	regs_buff[35] = er32(RDFHS);
3344	regs_buff[36] = er32(RDFTS);
3345	regs_buff[37] = er32(RDFPC);
3346
3347	pr_info("Register dump\n");
3348	for (i = 0; i < NUM_REGS; i++)
3349		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3350}
3351
3352/*
3353 * e1000_dump: Print registers, tx ring and rx ring
3354 */
3355static void e1000_dump(struct e1000_adapter *adapter)
3356{
3357	/* this code doesn't handle multiple rings */
3358	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360	int i;
3361
3362	if (!netif_msg_hw(adapter))
3363		return;
3364
3365	/* Print Registers */
3366	e1000_regdump(adapter);
3367
3368	/* transmit dump */
3369	pr_info("TX Desc ring0 dump\n");
3370
3371	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372	 *
3373	 * Legacy Transmit Descriptor
3374	 *   +--------------------------------------------------------------+
3375	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3376	 *   +--------------------------------------------------------------+
3377	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3378	 *   +--------------------------------------------------------------+
3379	 *   63       48 47        36 35    32 31     24 23    16 15        0
3380	 *
3381	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382	 *   63      48 47    40 39       32 31             16 15    8 7      0
3383	 *   +----------------------------------------------------------------+
3384	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3385	 *   +----------------------------------------------------------------+
3386	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3387	 *   +----------------------------------------------------------------+
3388	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3389	 *
3390	 * Extended Data Descriptor (DTYP=0x1)
3391	 *   +----------------------------------------------------------------+
3392	 * 0 |                     Buffer Address [63:0]                      |
3393	 *   +----------------------------------------------------------------+
3394	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3395	 *   +----------------------------------------------------------------+
3396	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3397	 */
3398	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3400
3401	if (!netif_msg_tx_done(adapter))
3402		goto rx_ring_summary;
3403
3404	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407		struct my_u { __le64 a; __le64 b; };
3408		struct my_u *u = (struct my_u *)tx_desc;
3409		const char *type;
3410
3411		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412			type = "NTC/U";
3413		else if (i == tx_ring->next_to_use)
3414			type = "NTU";
3415		else if (i == tx_ring->next_to_clean)
3416			type = "NTC";
3417		else
3418			type = "";
3419
3420		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3421			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422			le64_to_cpu(u->a), le64_to_cpu(u->b),
3423			(u64)buffer_info->dma, buffer_info->length,
3424			buffer_info->next_to_watch,
3425			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3426	}
3427
3428rx_ring_summary:
3429	/* receive dump */
3430	pr_info("\nRX Desc ring dump\n");
3431
3432	/* Legacy Receive Descriptor Format
3433	 *
3434	 * +-----------------------------------------------------+
3435	 * |                Buffer Address [63:0]                |
3436	 * +-----------------------------------------------------+
3437	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3438	 * +-----------------------------------------------------+
3439	 * 63       48 47    40 39      32 31         16 15      0
3440	 */
3441	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3442
3443	if (!netif_msg_rx_status(adapter))
3444		goto exit;
3445
3446	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3448		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449		struct my_u { __le64 a; __le64 b; };
3450		struct my_u *u = (struct my_u *)rx_desc;
3451		const char *type;
3452
3453		if (i == rx_ring->next_to_use)
3454			type = "NTU";
3455		else if (i == rx_ring->next_to_clean)
3456			type = "NTC";
3457		else
3458			type = "";
3459
3460		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3461			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3462			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463	} /* for */
3464
3465	/* dump the descriptor caches */
3466	/* rx */
3467	pr_info("Rx descriptor cache in 64bit format\n");
3468	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470			i,
3471			readl(adapter->hw.hw_addr + i+4),
3472			readl(adapter->hw.hw_addr + i),
3473			readl(adapter->hw.hw_addr + i+12),
3474			readl(adapter->hw.hw_addr + i+8));
3475	}
3476	/* tx */
3477	pr_info("Tx descriptor cache in 64bit format\n");
3478	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480			i,
3481			readl(adapter->hw.hw_addr + i+4),
3482			readl(adapter->hw.hw_addr + i),
3483			readl(adapter->hw.hw_addr + i+12),
3484			readl(adapter->hw.hw_addr + i+8));
3485	}
3486exit:
3487	return;
3488}
3489
3490/**
3491 * e1000_tx_timeout - Respond to a Tx Hang
3492 * @netdev: network interface device structure
3493 * @txqueue: number of the Tx queue that hung (unused)
3494 **/
3495static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496{
3497	struct e1000_adapter *adapter = netdev_priv(netdev);
3498
3499	/* Do the reset outside of interrupt context */
3500	adapter->tx_timeout_count++;
3501	schedule_work(&adapter->reset_task);
3502}
3503
3504static void e1000_reset_task(struct work_struct *work)
3505{
3506	struct e1000_adapter *adapter =
3507		container_of(work, struct e1000_adapter, reset_task);
3508
3509	e_err(drv, "Reset adapter\n");
3510	e1000_reinit_locked(adapter);
3511}
3512
3513/**
 
 
 
 
 
 
 
 
 
 
 
 
 
3514 * e1000_change_mtu - Change the Maximum Transfer Unit
3515 * @netdev: network interface device structure
3516 * @new_mtu: new value for maximum frame size
3517 *
3518 * Returns 0 on success, negative on failure
3519 **/
3520static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521{
3522	struct e1000_adapter *adapter = netdev_priv(netdev);
3523	struct e1000_hw *hw = &adapter->hw;
3524	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525
3526	/* Adapter-specific max frame size limits. */
3527	switch (hw->mac_type) {
3528	case e1000_undefined ... e1000_82542_rev2_1:
3529		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530			e_err(probe, "Jumbo Frames not supported.\n");
3531			return -EINVAL;
3532		}
3533		break;
3534	default:
3535		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3536		break;
3537	}
3538
3539	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540		msleep(1);
3541	/* e1000_down has a dependency on max_frame_size */
3542	hw->max_frame_size = max_frame;
3543	if (netif_running(netdev)) {
3544		/* prevent buffers from being reallocated */
3545		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546		e1000_down(adapter);
3547	}
3548
3549	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3550	 * means we reserve 2 more, this pushes us to allocate from the next
3551	 * larger slab size.
3552	 * i.e. RXBUFFER_2048 --> size-4096 slab
3553	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3554	 * fragmented skbs
3555	 */
3556
3557	if (max_frame <= E1000_RXBUFFER_2048)
3558		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559	else
3560#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563		adapter->rx_buffer_len = PAGE_SIZE;
3564#endif
3565
3566	/* adjust allocation if LPE protects us, and we aren't using SBP */
3567	if (!hw->tbi_compatibility_on &&
3568	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571
3572	netdev_dbg(netdev, "changing MTU from %d to %d\n",
3573		   netdev->mtu, new_mtu);
3574	netdev->mtu = new_mtu;
3575
3576	if (netif_running(netdev))
3577		e1000_up(adapter);
3578	else
3579		e1000_reset(adapter);
3580
3581	clear_bit(__E1000_RESETTING, &adapter->flags);
3582
3583	return 0;
3584}
3585
3586/**
3587 * e1000_update_stats - Update the board statistics counters
3588 * @adapter: board private structure
3589 **/
3590void e1000_update_stats(struct e1000_adapter *adapter)
3591{
3592	struct net_device *netdev = adapter->netdev;
3593	struct e1000_hw *hw = &adapter->hw;
3594	struct pci_dev *pdev = adapter->pdev;
3595	unsigned long flags;
3596	u16 phy_tmp;
3597
3598#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599
3600	/* Prevent stats update while adapter is being reset, or if the pci
3601	 * connection is down.
3602	 */
3603	if (adapter->link_speed == 0)
3604		return;
3605	if (pci_channel_offline(pdev))
3606		return;
3607
3608	spin_lock_irqsave(&adapter->stats_lock, flags);
3609
3610	/* these counters are modified from e1000_tbi_adjust_stats,
3611	 * called from the interrupt context, so they must only
3612	 * be written while holding adapter->stats_lock
3613	 */
3614
3615	adapter->stats.crcerrs += er32(CRCERRS);
3616	adapter->stats.gprc += er32(GPRC);
3617	adapter->stats.gorcl += er32(GORCL);
3618	adapter->stats.gorch += er32(GORCH);
3619	adapter->stats.bprc += er32(BPRC);
3620	adapter->stats.mprc += er32(MPRC);
3621	adapter->stats.roc += er32(ROC);
3622
3623	adapter->stats.prc64 += er32(PRC64);
3624	adapter->stats.prc127 += er32(PRC127);
3625	adapter->stats.prc255 += er32(PRC255);
3626	adapter->stats.prc511 += er32(PRC511);
3627	adapter->stats.prc1023 += er32(PRC1023);
3628	adapter->stats.prc1522 += er32(PRC1522);
3629
3630	adapter->stats.symerrs += er32(SYMERRS);
3631	adapter->stats.mpc += er32(MPC);
3632	adapter->stats.scc += er32(SCC);
3633	adapter->stats.ecol += er32(ECOL);
3634	adapter->stats.mcc += er32(MCC);
3635	adapter->stats.latecol += er32(LATECOL);
3636	adapter->stats.dc += er32(DC);
3637	adapter->stats.sec += er32(SEC);
3638	adapter->stats.rlec += er32(RLEC);
3639	adapter->stats.xonrxc += er32(XONRXC);
3640	adapter->stats.xontxc += er32(XONTXC);
3641	adapter->stats.xoffrxc += er32(XOFFRXC);
3642	adapter->stats.xofftxc += er32(XOFFTXC);
3643	adapter->stats.fcruc += er32(FCRUC);
3644	adapter->stats.gptc += er32(GPTC);
3645	adapter->stats.gotcl += er32(GOTCL);
3646	adapter->stats.gotch += er32(GOTCH);
3647	adapter->stats.rnbc += er32(RNBC);
3648	adapter->stats.ruc += er32(RUC);
3649	adapter->stats.rfc += er32(RFC);
3650	adapter->stats.rjc += er32(RJC);
3651	adapter->stats.torl += er32(TORL);
3652	adapter->stats.torh += er32(TORH);
3653	adapter->stats.totl += er32(TOTL);
3654	adapter->stats.toth += er32(TOTH);
3655	adapter->stats.tpr += er32(TPR);
3656
3657	adapter->stats.ptc64 += er32(PTC64);
3658	adapter->stats.ptc127 += er32(PTC127);
3659	adapter->stats.ptc255 += er32(PTC255);
3660	adapter->stats.ptc511 += er32(PTC511);
3661	adapter->stats.ptc1023 += er32(PTC1023);
3662	adapter->stats.ptc1522 += er32(PTC1522);
3663
3664	adapter->stats.mptc += er32(MPTC);
3665	adapter->stats.bptc += er32(BPTC);
3666
3667	/* used for adaptive IFS */
3668
3669	hw->tx_packet_delta = er32(TPT);
3670	adapter->stats.tpt += hw->tx_packet_delta;
3671	hw->collision_delta = er32(COLC);
3672	adapter->stats.colc += hw->collision_delta;
3673
3674	if (hw->mac_type >= e1000_82543) {
3675		adapter->stats.algnerrc += er32(ALGNERRC);
3676		adapter->stats.rxerrc += er32(RXERRC);
3677		adapter->stats.tncrs += er32(TNCRS);
3678		adapter->stats.cexterr += er32(CEXTERR);
3679		adapter->stats.tsctc += er32(TSCTC);
3680		adapter->stats.tsctfc += er32(TSCTFC);
3681	}
3682
3683	/* Fill out the OS statistics structure */
3684	netdev->stats.multicast = adapter->stats.mprc;
3685	netdev->stats.collisions = adapter->stats.colc;
3686
3687	/* Rx Errors */
3688
3689	/* RLEC on some newer hardware can be incorrect so build
3690	 * our own version based on RUC and ROC
3691	 */
3692	netdev->stats.rx_errors = adapter->stats.rxerrc +
3693		adapter->stats.crcerrs + adapter->stats.algnerrc +
3694		adapter->stats.ruc + adapter->stats.roc +
3695		adapter->stats.cexterr;
3696	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701
3702	/* Tx Errors */
3703	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704	netdev->stats.tx_errors = adapter->stats.txerrc;
3705	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706	netdev->stats.tx_window_errors = adapter->stats.latecol;
3707	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708	if (hw->bad_tx_carr_stats_fd &&
3709	    adapter->link_duplex == FULL_DUPLEX) {
3710		netdev->stats.tx_carrier_errors = 0;
3711		adapter->stats.tncrs = 0;
3712	}
3713
3714	/* Tx Dropped needs to be maintained elsewhere */
3715
3716	/* Phy Stats */
3717	if (hw->media_type == e1000_media_type_copper) {
3718		if ((adapter->link_speed == SPEED_1000) &&
3719		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721			adapter->phy_stats.idle_errors += phy_tmp;
3722		}
3723
3724		if ((hw->mac_type <= e1000_82546) &&
3725		   (hw->phy_type == e1000_phy_m88) &&
3726		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727			adapter->phy_stats.receive_errors += phy_tmp;
3728	}
3729
3730	/* Management Stats */
3731	if (hw->has_smbus) {
3732		adapter->stats.mgptc += er32(MGTPTC);
3733		adapter->stats.mgprc += er32(MGTPRC);
3734		adapter->stats.mgpdc += er32(MGTPDC);
3735	}
3736
3737	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738}
3739
3740/**
3741 * e1000_intr - Interrupt Handler
3742 * @irq: interrupt number
3743 * @data: pointer to a network interface device structure
3744 **/
3745static irqreturn_t e1000_intr(int irq, void *data)
3746{
3747	struct net_device *netdev = data;
3748	struct e1000_adapter *adapter = netdev_priv(netdev);
3749	struct e1000_hw *hw = &adapter->hw;
3750	u32 icr = er32(ICR);
3751
3752	if (unlikely((!icr)))
3753		return IRQ_NONE;  /* Not our interrupt */
3754
3755	/* we might have caused the interrupt, but the above
3756	 * read cleared it, and just in case the driver is
3757	 * down there is nothing to do so return handled
3758	 */
3759	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760		return IRQ_HANDLED;
3761
3762	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763		hw->get_link_status = 1;
3764		/* guard against interrupt when we're going down */
3765		if (!test_bit(__E1000_DOWN, &adapter->flags))
3766			schedule_delayed_work(&adapter->watchdog_task, 1);
3767	}
3768
3769	/* disable interrupts, without the synchronize_irq bit */
3770	ew32(IMC, ~0);
3771	E1000_WRITE_FLUSH();
3772
3773	if (likely(napi_schedule_prep(&adapter->napi))) {
3774		adapter->total_tx_bytes = 0;
3775		adapter->total_tx_packets = 0;
3776		adapter->total_rx_bytes = 0;
3777		adapter->total_rx_packets = 0;
3778		__napi_schedule(&adapter->napi);
3779	} else {
3780		/* this really should not happen! if it does it is basically a
3781		 * bug, but not a hard error, so enable ints and continue
3782		 */
3783		if (!test_bit(__E1000_DOWN, &adapter->flags))
3784			e1000_irq_enable(adapter);
3785	}
3786
3787	return IRQ_HANDLED;
3788}
3789
3790/**
3791 * e1000_clean - NAPI Rx polling callback
3792 * @napi: napi struct containing references to driver info
3793 * @budget: budget given to driver for receive packets
3794 **/
3795static int e1000_clean(struct napi_struct *napi, int budget)
3796{
3797	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3798						     napi);
3799	int tx_clean_complete = 0, work_done = 0;
3800
3801	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802
3803	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804
3805	if (!tx_clean_complete || work_done == budget)
3806		return budget;
3807
3808	/* Exit the polling mode, but don't re-enable interrupts if stack might
3809	 * poll us due to busy-polling
3810	 */
3811	if (likely(napi_complete_done(napi, work_done))) {
3812		if (likely(adapter->itr_setting & 3))
3813			e1000_set_itr(adapter);
 
3814		if (!test_bit(__E1000_DOWN, &adapter->flags))
3815			e1000_irq_enable(adapter);
3816	}
3817
3818	return work_done;
3819}
3820
3821/**
3822 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823 * @adapter: board private structure
3824 * @tx_ring: ring to clean
3825 **/
3826static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827			       struct e1000_tx_ring *tx_ring)
3828{
3829	struct e1000_hw *hw = &adapter->hw;
3830	struct net_device *netdev = adapter->netdev;
3831	struct e1000_tx_desc *tx_desc, *eop_desc;
3832	struct e1000_tx_buffer *buffer_info;
3833	unsigned int i, eop;
3834	unsigned int count = 0;
3835	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3836	unsigned int bytes_compl = 0, pkts_compl = 0;
3837
3838	i = tx_ring->next_to_clean;
3839	eop = tx_ring->buffer_info[i].next_to_watch;
3840	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841
3842	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843	       (count < tx_ring->count)) {
3844		bool cleaned = false;
3845		dma_rmb();	/* read buffer_info after eop_desc */
3846		for ( ; !cleaned; count++) {
3847			tx_desc = E1000_TX_DESC(*tx_ring, i);
3848			buffer_info = &tx_ring->buffer_info[i];
3849			cleaned = (i == eop);
3850
3851			if (cleaned) {
3852				total_tx_packets += buffer_info->segs;
3853				total_tx_bytes += buffer_info->bytecount;
3854				if (buffer_info->skb) {
3855					bytes_compl += buffer_info->skb->len;
3856					pkts_compl++;
3857				}
3858
3859			}
3860			e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3861							 64);
3862			tx_desc->upper.data = 0;
3863
3864			if (unlikely(++i == tx_ring->count))
3865				i = 0;
3866		}
3867
3868		eop = tx_ring->buffer_info[i].next_to_watch;
3869		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870	}
3871
3872	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3873	 * which will reuse the cleaned buffers.
3874	 */
3875	smp_store_release(&tx_ring->next_to_clean, i);
3876
3877	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3878
3879#define TX_WAKE_THRESHOLD 32
3880	if (unlikely(count && netif_carrier_ok(netdev) &&
3881		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3882		/* Make sure that anybody stopping the queue after this
3883		 * sees the new next_to_clean.
3884		 */
3885		smp_mb();
3886
3887		if (netif_queue_stopped(netdev) &&
3888		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3889			netif_wake_queue(netdev);
3890			++adapter->restart_queue;
3891		}
3892	}
3893
3894	if (adapter->detect_tx_hung) {
3895		/* Detect a transmit hang in hardware, this serializes the
3896		 * check with the clearing of time_stamp and movement of i
3897		 */
3898		adapter->detect_tx_hung = false;
3899		if (tx_ring->buffer_info[eop].time_stamp &&
3900		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3901			       (adapter->tx_timeout_factor * HZ)) &&
3902		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3903
3904			/* detected Tx unit hang */
3905			e_err(drv, "Detected Tx Unit Hang\n"
3906			      "  Tx Queue             <%lu>\n"
3907			      "  TDH                  <%x>\n"
3908			      "  TDT                  <%x>\n"
3909			      "  next_to_use          <%x>\n"
3910			      "  next_to_clean        <%x>\n"
3911			      "buffer_info[next_to_clean]\n"
3912			      "  time_stamp           <%lx>\n"
3913			      "  next_to_watch        <%x>\n"
3914			      "  jiffies              <%lx>\n"
3915			      "  next_to_watch.status <%x>\n",
3916				(unsigned long)(tx_ring - adapter->tx_ring),
3917				readl(hw->hw_addr + tx_ring->tdh),
3918				readl(hw->hw_addr + tx_ring->tdt),
3919				tx_ring->next_to_use,
3920				tx_ring->next_to_clean,
3921				tx_ring->buffer_info[eop].time_stamp,
3922				eop,
3923				jiffies,
3924				eop_desc->upper.fields.status);
3925			e1000_dump(adapter);
3926			netif_stop_queue(netdev);
3927		}
3928	}
3929	adapter->total_tx_bytes += total_tx_bytes;
3930	adapter->total_tx_packets += total_tx_packets;
3931	netdev->stats.tx_bytes += total_tx_bytes;
3932	netdev->stats.tx_packets += total_tx_packets;
3933	return count < tx_ring->count;
3934}
3935
3936/**
3937 * e1000_rx_checksum - Receive Checksum Offload for 82543
3938 * @adapter:     board private structure
3939 * @status_err:  receive descriptor status and error fields
3940 * @csum:        receive descriptor csum field
3941 * @skb:         socket buffer with received data
3942 **/
3943static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3944			      u32 csum, struct sk_buff *skb)
3945{
3946	struct e1000_hw *hw = &adapter->hw;
3947	u16 status = (u16)status_err;
3948	u8 errors = (u8)(status_err >> 24);
3949
3950	skb_checksum_none_assert(skb);
3951
3952	/* 82543 or newer only */
3953	if (unlikely(hw->mac_type < e1000_82543))
3954		return;
3955	/* Ignore Checksum bit is set */
3956	if (unlikely(status & E1000_RXD_STAT_IXSM))
3957		return;
3958	/* TCP/UDP checksum error bit is set */
3959	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3960		/* let the stack verify checksum errors */
3961		adapter->hw_csum_err++;
3962		return;
3963	}
3964	/* TCP/UDP Checksum has not been calculated */
3965	if (!(status & E1000_RXD_STAT_TCPCS))
3966		return;
3967
3968	/* It must be a TCP or UDP packet with a valid checksum */
3969	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3970		/* TCP checksum is good */
3971		skb->ip_summed = CHECKSUM_UNNECESSARY;
3972	}
3973	adapter->hw_csum_good++;
3974}
3975
3976/**
3977 * e1000_consume_page - helper function for jumbo Rx path
3978 * @bi: software descriptor shadow data
3979 * @skb: skb being modified
3980 * @length: length of data being added
3981 **/
3982static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3983			       u16 length)
3984{
3985	bi->rxbuf.page = NULL;
3986	skb->len += length;
3987	skb->data_len += length;
3988	skb->truesize += PAGE_SIZE;
3989}
3990
3991/**
3992 * e1000_receive_skb - helper function to handle rx indications
3993 * @adapter: board private structure
3994 * @status: descriptor status field as written by hardware
3995 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3996 * @skb: pointer to sk_buff to be indicated to stack
3997 */
3998static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3999			      __le16 vlan, struct sk_buff *skb)
4000{
4001	skb->protocol = eth_type_trans(skb, adapter->netdev);
4002
4003	if (status & E1000_RXD_STAT_VP) {
4004		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4005
4006		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4007	}
4008	napi_gro_receive(&adapter->napi, skb);
4009}
4010
4011/**
4012 * e1000_tbi_adjust_stats
4013 * @hw: Struct containing variables accessed by shared code
4014 * @stats: point to stats struct
4015 * @frame_len: The length of the frame in question
4016 * @mac_addr: The Ethernet destination address of the frame in question
4017 *
4018 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4019 */
4020static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4021				   struct e1000_hw_stats *stats,
4022				   u32 frame_len, const u8 *mac_addr)
4023{
4024	u64 carry_bit;
4025
4026	/* First adjust the frame length. */
4027	frame_len--;
4028	/* We need to adjust the statistics counters, since the hardware
4029	 * counters overcount this packet as a CRC error and undercount
4030	 * the packet as a good packet
4031	 */
4032	/* This packet should not be counted as a CRC error. */
4033	stats->crcerrs--;
4034	/* This packet does count as a Good Packet Received. */
4035	stats->gprc++;
4036
4037	/* Adjust the Good Octets received counters */
4038	carry_bit = 0x80000000 & stats->gorcl;
4039	stats->gorcl += frame_len;
4040	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4041	 * Received Count) was one before the addition,
4042	 * AND it is zero after, then we lost the carry out,
4043	 * need to add one to Gorch (Good Octets Received Count High).
4044	 * This could be simplified if all environments supported
4045	 * 64-bit integers.
4046	 */
4047	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4048		stats->gorch++;
4049	/* Is this a broadcast or multicast?  Check broadcast first,
4050	 * since the test for a multicast frame will test positive on
4051	 * a broadcast frame.
4052	 */
4053	if (is_broadcast_ether_addr(mac_addr))
4054		stats->bprc++;
4055	else if (is_multicast_ether_addr(mac_addr))
4056		stats->mprc++;
4057
4058	if (frame_len == hw->max_frame_size) {
4059		/* In this case, the hardware has overcounted the number of
4060		 * oversize frames.
4061		 */
4062		if (stats->roc > 0)
4063			stats->roc--;
4064	}
4065
4066	/* Adjust the bin counters when the extra byte put the frame in the
4067	 * wrong bin. Remember that the frame_len was adjusted above.
4068	 */
4069	if (frame_len == 64) {
4070		stats->prc64++;
4071		stats->prc127--;
4072	} else if (frame_len == 127) {
4073		stats->prc127++;
4074		stats->prc255--;
4075	} else if (frame_len == 255) {
4076		stats->prc255++;
4077		stats->prc511--;
4078	} else if (frame_len == 511) {
4079		stats->prc511++;
4080		stats->prc1023--;
4081	} else if (frame_len == 1023) {
4082		stats->prc1023++;
4083		stats->prc1522--;
4084	} else if (frame_len == 1522) {
4085		stats->prc1522++;
4086	}
4087}
4088
4089static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4090				    u8 status, u8 errors,
4091				    u32 length, const u8 *data)
4092{
4093	struct e1000_hw *hw = &adapter->hw;
4094	u8 last_byte = *(data + length - 1);
4095
4096	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4097		unsigned long irq_flags;
4098
4099		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4100		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4101		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4102
4103		return true;
4104	}
4105
4106	return false;
4107}
4108
4109static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4110					  unsigned int bufsz)
4111{
4112	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4113
4114	if (unlikely(!skb))
4115		adapter->alloc_rx_buff_failed++;
4116	return skb;
4117}
4118
4119/**
4120 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4121 * @adapter: board private structure
4122 * @rx_ring: ring to clean
4123 * @work_done: amount of napi work completed this call
4124 * @work_to_do: max amount of work allowed for this call to do
4125 *
4126 * the return value indicates whether actual cleaning was done, there
4127 * is no guarantee that everything was cleaned
4128 */
4129static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4130				     struct e1000_rx_ring *rx_ring,
4131				     int *work_done, int work_to_do)
4132{
4133	struct net_device *netdev = adapter->netdev;
4134	struct pci_dev *pdev = adapter->pdev;
4135	struct e1000_rx_desc *rx_desc, *next_rxd;
4136	struct e1000_rx_buffer *buffer_info, *next_buffer;
4137	u32 length;
4138	unsigned int i;
4139	int cleaned_count = 0;
4140	bool cleaned = false;
4141	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4142
4143	i = rx_ring->next_to_clean;
4144	rx_desc = E1000_RX_DESC(*rx_ring, i);
4145	buffer_info = &rx_ring->buffer_info[i];
4146
4147	while (rx_desc->status & E1000_RXD_STAT_DD) {
4148		struct sk_buff *skb;
4149		u8 status;
4150
4151		if (*work_done >= work_to_do)
4152			break;
4153		(*work_done)++;
4154		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4155
4156		status = rx_desc->status;
4157
4158		if (++i == rx_ring->count)
4159			i = 0;
4160
4161		next_rxd = E1000_RX_DESC(*rx_ring, i);
4162		prefetch(next_rxd);
4163
4164		next_buffer = &rx_ring->buffer_info[i];
4165
4166		cleaned = true;
4167		cleaned_count++;
4168		dma_unmap_page(&pdev->dev, buffer_info->dma,
4169			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4170		buffer_info->dma = 0;
4171
4172		length = le16_to_cpu(rx_desc->length);
4173
4174		/* errors is only valid for DD + EOP descriptors */
4175		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4176		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4177			u8 *mapped = page_address(buffer_info->rxbuf.page);
4178
4179			if (e1000_tbi_should_accept(adapter, status,
4180						    rx_desc->errors,
4181						    length, mapped)) {
4182				length--;
4183			} else if (netdev->features & NETIF_F_RXALL) {
4184				goto process_skb;
4185			} else {
4186				/* an error means any chain goes out the window
4187				 * too
4188				 */
4189				dev_kfree_skb(rx_ring->rx_skb_top);
 
4190				rx_ring->rx_skb_top = NULL;
4191				goto next_desc;
4192			}
4193		}
4194
4195#define rxtop rx_ring->rx_skb_top
4196process_skb:
4197		if (!(status & E1000_RXD_STAT_EOP)) {
4198			/* this descriptor is only the beginning (or middle) */
4199			if (!rxtop) {
4200				/* this is the beginning of a chain */
4201				rxtop = napi_get_frags(&adapter->napi);
4202				if (!rxtop)
4203					break;
4204
4205				skb_fill_page_desc(rxtop, 0,
4206						   buffer_info->rxbuf.page,
4207						   0, length);
4208			} else {
4209				/* this is the middle of a chain */
4210				skb_fill_page_desc(rxtop,
4211				    skb_shinfo(rxtop)->nr_frags,
4212				    buffer_info->rxbuf.page, 0, length);
4213			}
4214			e1000_consume_page(buffer_info, rxtop, length);
4215			goto next_desc;
4216		} else {
4217			if (rxtop) {
4218				/* end of the chain */
4219				skb_fill_page_desc(rxtop,
4220				    skb_shinfo(rxtop)->nr_frags,
4221				    buffer_info->rxbuf.page, 0, length);
4222				skb = rxtop;
4223				rxtop = NULL;
4224				e1000_consume_page(buffer_info, skb, length);
4225			} else {
4226				struct page *p;
4227				/* no chain, got EOP, this buf is the packet
4228				 * copybreak to save the put_page/alloc_page
4229				 */
4230				p = buffer_info->rxbuf.page;
4231				if (length <= copybreak) {
 
 
4232					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4233						length -= 4;
4234					skb = e1000_alloc_rx_skb(adapter,
4235								 length);
4236					if (!skb)
4237						break;
4238
4239					memcpy(skb_tail_pointer(skb),
4240					       page_address(p), length);
4241
 
4242					/* re-use the page, so don't erase
4243					 * buffer_info->rxbuf.page
4244					 */
4245					skb_put(skb, length);
4246					e1000_rx_checksum(adapter,
4247							  status | rx_desc->errors << 24,
4248							  le16_to_cpu(rx_desc->csum), skb);
4249
4250					total_rx_bytes += skb->len;
4251					total_rx_packets++;
4252
4253					e1000_receive_skb(adapter, status,
4254							  rx_desc->special, skb);
4255					goto next_desc;
4256				} else {
4257					skb = napi_get_frags(&adapter->napi);
4258					if (!skb) {
4259						adapter->alloc_rx_buff_failed++;
4260						break;
4261					}
4262					skb_fill_page_desc(skb, 0, p, 0,
4263							   length);
4264					e1000_consume_page(buffer_info, skb,
4265							   length);
4266				}
4267			}
4268		}
4269
4270		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4271		e1000_rx_checksum(adapter,
4272				  (u32)(status) |
4273				  ((u32)(rx_desc->errors) << 24),
4274				  le16_to_cpu(rx_desc->csum), skb);
4275
4276		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4277		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4278			pskb_trim(skb, skb->len - 4);
4279		total_rx_packets++;
4280
4281		if (status & E1000_RXD_STAT_VP) {
4282			__le16 vlan = rx_desc->special;
4283			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4284
4285			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4286		}
4287
4288		napi_gro_frags(&adapter->napi);
4289
4290next_desc:
4291		rx_desc->status = 0;
4292
4293		/* return some buffers to hardware, one at a time is too slow */
4294		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4295			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4296			cleaned_count = 0;
4297		}
4298
4299		/* use prefetched values */
4300		rx_desc = next_rxd;
4301		buffer_info = next_buffer;
4302	}
4303	rx_ring->next_to_clean = i;
4304
4305	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4306	if (cleaned_count)
4307		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4308
4309	adapter->total_rx_packets += total_rx_packets;
4310	adapter->total_rx_bytes += total_rx_bytes;
4311	netdev->stats.rx_bytes += total_rx_bytes;
4312	netdev->stats.rx_packets += total_rx_packets;
4313	return cleaned;
4314}
4315
4316/* this should improve performance for small packets with large amounts
4317 * of reassembly being done in the stack
4318 */
4319static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4320				       struct e1000_rx_buffer *buffer_info,
4321				       u32 length, const void *data)
4322{
4323	struct sk_buff *skb;
4324
4325	if (length > copybreak)
4326		return NULL;
4327
4328	skb = e1000_alloc_rx_skb(adapter, length);
4329	if (!skb)
4330		return NULL;
4331
4332	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4333				length, DMA_FROM_DEVICE);
4334
4335	skb_put_data(skb, data, length);
4336
4337	return skb;
4338}
4339
4340/**
4341 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4342 * @adapter: board private structure
4343 * @rx_ring: ring to clean
4344 * @work_done: amount of napi work completed this call
4345 * @work_to_do: max amount of work allowed for this call to do
4346 */
4347static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4348			       struct e1000_rx_ring *rx_ring,
4349			       int *work_done, int work_to_do)
4350{
4351	struct net_device *netdev = adapter->netdev;
4352	struct pci_dev *pdev = adapter->pdev;
4353	struct e1000_rx_desc *rx_desc, *next_rxd;
4354	struct e1000_rx_buffer *buffer_info, *next_buffer;
4355	u32 length;
4356	unsigned int i;
4357	int cleaned_count = 0;
4358	bool cleaned = false;
4359	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4360
4361	i = rx_ring->next_to_clean;
4362	rx_desc = E1000_RX_DESC(*rx_ring, i);
4363	buffer_info = &rx_ring->buffer_info[i];
4364
4365	while (rx_desc->status & E1000_RXD_STAT_DD) {
4366		struct sk_buff *skb;
4367		u8 *data;
4368		u8 status;
4369
4370		if (*work_done >= work_to_do)
4371			break;
4372		(*work_done)++;
4373		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4374
4375		status = rx_desc->status;
4376		length = le16_to_cpu(rx_desc->length);
4377
4378		data = buffer_info->rxbuf.data;
4379		prefetch(data);
4380		skb = e1000_copybreak(adapter, buffer_info, length, data);
4381		if (!skb) {
4382			unsigned int frag_len = e1000_frag_len(adapter);
4383
4384			skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4385			if (!skb) {
4386				adapter->alloc_rx_buff_failed++;
4387				break;
4388			}
4389
4390			skb_reserve(skb, E1000_HEADROOM);
4391			dma_unmap_single(&pdev->dev, buffer_info->dma,
4392					 adapter->rx_buffer_len,
4393					 DMA_FROM_DEVICE);
4394			buffer_info->dma = 0;
4395			buffer_info->rxbuf.data = NULL;
4396		}
4397
4398		if (++i == rx_ring->count)
4399			i = 0;
4400
4401		next_rxd = E1000_RX_DESC(*rx_ring, i);
4402		prefetch(next_rxd);
4403
4404		next_buffer = &rx_ring->buffer_info[i];
4405
4406		cleaned = true;
4407		cleaned_count++;
4408
4409		/* !EOP means multiple descriptors were used to store a single
4410		 * packet, if thats the case we need to toss it.  In fact, we
4411		 * to toss every packet with the EOP bit clear and the next
4412		 * frame that _does_ have the EOP bit set, as it is by
4413		 * definition only a frame fragment
4414		 */
4415		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4416			adapter->discarding = true;
4417
4418		if (adapter->discarding) {
4419			/* All receives must fit into a single buffer */
4420			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4421			dev_kfree_skb(skb);
4422			if (status & E1000_RXD_STAT_EOP)
4423				adapter->discarding = false;
4424			goto next_desc;
4425		}
4426
4427		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4428			if (e1000_tbi_should_accept(adapter, status,
4429						    rx_desc->errors,
4430						    length, data)) {
4431				length--;
4432			} else if (netdev->features & NETIF_F_RXALL) {
4433				goto process_skb;
4434			} else {
4435				dev_kfree_skb(skb);
4436				goto next_desc;
4437			}
4438		}
4439
4440process_skb:
4441		total_rx_bytes += (length - 4); /* don't count FCS */
4442		total_rx_packets++;
4443
4444		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4445			/* adjust length to remove Ethernet CRC, this must be
4446			 * done after the TBI_ACCEPT workaround above
4447			 */
4448			length -= 4;
4449
4450		if (buffer_info->rxbuf.data == NULL)
4451			skb_put(skb, length);
4452		else /* copybreak skb */
4453			skb_trim(skb, length);
4454
4455		/* Receive Checksum Offload */
4456		e1000_rx_checksum(adapter,
4457				  (u32)(status) |
4458				  ((u32)(rx_desc->errors) << 24),
4459				  le16_to_cpu(rx_desc->csum), skb);
4460
4461		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4462
4463next_desc:
4464		rx_desc->status = 0;
4465
4466		/* return some buffers to hardware, one at a time is too slow */
4467		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4468			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4469			cleaned_count = 0;
4470		}
4471
4472		/* use prefetched values */
4473		rx_desc = next_rxd;
4474		buffer_info = next_buffer;
4475	}
4476	rx_ring->next_to_clean = i;
4477
4478	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4479	if (cleaned_count)
4480		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4481
4482	adapter->total_rx_packets += total_rx_packets;
4483	adapter->total_rx_bytes += total_rx_bytes;
4484	netdev->stats.rx_bytes += total_rx_bytes;
4485	netdev->stats.rx_packets += total_rx_packets;
4486	return cleaned;
4487}
4488
4489/**
4490 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4491 * @adapter: address of board private structure
4492 * @rx_ring: pointer to receive ring structure
4493 * @cleaned_count: number of buffers to allocate this pass
4494 **/
4495static void
4496e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4497			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4498{
4499	struct pci_dev *pdev = adapter->pdev;
4500	struct e1000_rx_desc *rx_desc;
4501	struct e1000_rx_buffer *buffer_info;
4502	unsigned int i;
4503
4504	i = rx_ring->next_to_use;
4505	buffer_info = &rx_ring->buffer_info[i];
4506
4507	while (cleaned_count--) {
4508		/* allocate a new page if necessary */
4509		if (!buffer_info->rxbuf.page) {
4510			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4511			if (unlikely(!buffer_info->rxbuf.page)) {
4512				adapter->alloc_rx_buff_failed++;
4513				break;
4514			}
4515		}
4516
4517		if (!buffer_info->dma) {
4518			buffer_info->dma = dma_map_page(&pdev->dev,
4519							buffer_info->rxbuf.page, 0,
4520							adapter->rx_buffer_len,
4521							DMA_FROM_DEVICE);
4522			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4523				put_page(buffer_info->rxbuf.page);
4524				buffer_info->rxbuf.page = NULL;
4525				buffer_info->dma = 0;
4526				adapter->alloc_rx_buff_failed++;
4527				break;
4528			}
4529		}
4530
4531		rx_desc = E1000_RX_DESC(*rx_ring, i);
4532		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4533
4534		if (unlikely(++i == rx_ring->count))
4535			i = 0;
4536		buffer_info = &rx_ring->buffer_info[i];
4537	}
4538
4539	if (likely(rx_ring->next_to_use != i)) {
4540		rx_ring->next_to_use = i;
4541		if (unlikely(i-- == 0))
4542			i = (rx_ring->count - 1);
4543
4544		/* Force memory writes to complete before letting h/w
4545		 * know there are new descriptors to fetch.  (Only
4546		 * applicable for weak-ordered memory model archs,
4547		 * such as IA-64).
4548		 */
4549		dma_wmb();
4550		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4551	}
4552}
4553
4554/**
4555 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4556 * @adapter: address of board private structure
4557 * @rx_ring: pointer to ring struct
4558 * @cleaned_count: number of new Rx buffers to try to allocate
4559 **/
4560static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4561				   struct e1000_rx_ring *rx_ring,
4562				   int cleaned_count)
4563{
4564	struct e1000_hw *hw = &adapter->hw;
4565	struct pci_dev *pdev = adapter->pdev;
4566	struct e1000_rx_desc *rx_desc;
4567	struct e1000_rx_buffer *buffer_info;
4568	unsigned int i;
4569	unsigned int bufsz = adapter->rx_buffer_len;
4570
4571	i = rx_ring->next_to_use;
4572	buffer_info = &rx_ring->buffer_info[i];
4573
4574	while (cleaned_count--) {
4575		void *data;
4576
4577		if (buffer_info->rxbuf.data)
4578			goto skip;
4579
4580		data = e1000_alloc_frag(adapter);
4581		if (!data) {
4582			/* Better luck next round */
4583			adapter->alloc_rx_buff_failed++;
4584			break;
4585		}
4586
4587		/* Fix for errata 23, can't cross 64kB boundary */
4588		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4589			void *olddata = data;
4590			e_err(rx_err, "skb align check failed: %u bytes at "
4591			      "%p\n", bufsz, data);
4592			/* Try again, without freeing the previous */
4593			data = e1000_alloc_frag(adapter);
4594			/* Failed allocation, critical failure */
4595			if (!data) {
4596				skb_free_frag(olddata);
4597				adapter->alloc_rx_buff_failed++;
4598				break;
4599			}
4600
4601			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4602				/* give up */
4603				skb_free_frag(data);
4604				skb_free_frag(olddata);
4605				adapter->alloc_rx_buff_failed++;
4606				break;
4607			}
4608
4609			/* Use new allocation */
4610			skb_free_frag(olddata);
4611		}
4612		buffer_info->dma = dma_map_single(&pdev->dev,
4613						  data,
4614						  adapter->rx_buffer_len,
4615						  DMA_FROM_DEVICE);
4616		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4617			skb_free_frag(data);
4618			buffer_info->dma = 0;
4619			adapter->alloc_rx_buff_failed++;
4620			break;
4621		}
4622
4623		/* XXX if it was allocated cleanly it will never map to a
4624		 * boundary crossing
4625		 */
4626
4627		/* Fix for errata 23, can't cross 64kB boundary */
4628		if (!e1000_check_64k_bound(adapter,
4629					(void *)(unsigned long)buffer_info->dma,
4630					adapter->rx_buffer_len)) {
4631			e_err(rx_err, "dma align check failed: %u bytes at "
4632			      "%p\n", adapter->rx_buffer_len,
4633			      (void *)(unsigned long)buffer_info->dma);
4634
4635			dma_unmap_single(&pdev->dev, buffer_info->dma,
4636					 adapter->rx_buffer_len,
4637					 DMA_FROM_DEVICE);
4638
4639			skb_free_frag(data);
4640			buffer_info->rxbuf.data = NULL;
4641			buffer_info->dma = 0;
4642
4643			adapter->alloc_rx_buff_failed++;
4644			break;
4645		}
4646		buffer_info->rxbuf.data = data;
4647 skip:
4648		rx_desc = E1000_RX_DESC(*rx_ring, i);
4649		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4650
4651		if (unlikely(++i == rx_ring->count))
4652			i = 0;
4653		buffer_info = &rx_ring->buffer_info[i];
4654	}
4655
4656	if (likely(rx_ring->next_to_use != i)) {
4657		rx_ring->next_to_use = i;
4658		if (unlikely(i-- == 0))
4659			i = (rx_ring->count - 1);
4660
4661		/* Force memory writes to complete before letting h/w
4662		 * know there are new descriptors to fetch.  (Only
4663		 * applicable for weak-ordered memory model archs,
4664		 * such as IA-64).
4665		 */
4666		dma_wmb();
4667		writel(i, hw->hw_addr + rx_ring->rdt);
4668	}
4669}
4670
4671/**
4672 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4673 * @adapter: address of board private structure
4674 **/
4675static void e1000_smartspeed(struct e1000_adapter *adapter)
4676{
4677	struct e1000_hw *hw = &adapter->hw;
4678	u16 phy_status;
4679	u16 phy_ctrl;
4680
4681	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4682	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4683		return;
4684
4685	if (adapter->smartspeed == 0) {
4686		/* If Master/Slave config fault is asserted twice,
4687		 * we assume back-to-back
4688		 */
4689		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4690		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4691			return;
4692		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4693		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4694			return;
4695		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4696		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4697			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4698			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4699					    phy_ctrl);
4700			adapter->smartspeed++;
4701			if (!e1000_phy_setup_autoneg(hw) &&
4702			   !e1000_read_phy_reg(hw, PHY_CTRL,
4703					       &phy_ctrl)) {
4704				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4705					     MII_CR_RESTART_AUTO_NEG);
4706				e1000_write_phy_reg(hw, PHY_CTRL,
4707						    phy_ctrl);
4708			}
4709		}
4710		return;
4711	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4712		/* If still no link, perhaps using 2/3 pair cable */
4713		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4714		phy_ctrl |= CR_1000T_MS_ENABLE;
4715		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4716		if (!e1000_phy_setup_autoneg(hw) &&
4717		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4718			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4719				     MII_CR_RESTART_AUTO_NEG);
4720			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4721		}
4722	}
4723	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4724	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4725		adapter->smartspeed = 0;
4726}
4727
4728/**
4729 * e1000_ioctl - handle ioctl calls
4730 * @netdev: pointer to our netdev
4731 * @ifr: pointer to interface request structure
4732 * @cmd: ioctl data
4733 **/
4734static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4735{
4736	switch (cmd) {
4737	case SIOCGMIIPHY:
4738	case SIOCGMIIREG:
4739	case SIOCSMIIREG:
4740		return e1000_mii_ioctl(netdev, ifr, cmd);
4741	default:
4742		return -EOPNOTSUPP;
4743	}
4744}
4745
4746/**
4747 * e1000_mii_ioctl -
4748 * @netdev: pointer to our netdev
4749 * @ifr: pointer to interface request structure
4750 * @cmd: ioctl data
4751 **/
4752static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4753			   int cmd)
4754{
4755	struct e1000_adapter *adapter = netdev_priv(netdev);
4756	struct e1000_hw *hw = &adapter->hw;
4757	struct mii_ioctl_data *data = if_mii(ifr);
4758	int retval;
4759	u16 mii_reg;
4760	unsigned long flags;
4761
4762	if (hw->media_type != e1000_media_type_copper)
4763		return -EOPNOTSUPP;
4764
4765	switch (cmd) {
4766	case SIOCGMIIPHY:
4767		data->phy_id = hw->phy_addr;
4768		break;
4769	case SIOCGMIIREG:
4770		spin_lock_irqsave(&adapter->stats_lock, flags);
4771		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4772				   &data->val_out)) {
4773			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4774			return -EIO;
4775		}
4776		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777		break;
4778	case SIOCSMIIREG:
4779		if (data->reg_num & ~(0x1F))
4780			return -EFAULT;
4781		mii_reg = data->val_in;
4782		spin_lock_irqsave(&adapter->stats_lock, flags);
4783		if (e1000_write_phy_reg(hw, data->reg_num,
4784					mii_reg)) {
4785			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4786			return -EIO;
4787		}
4788		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4789		if (hw->media_type == e1000_media_type_copper) {
4790			switch (data->reg_num) {
4791			case PHY_CTRL:
4792				if (mii_reg & MII_CR_POWER_DOWN)
4793					break;
4794				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4795					hw->autoneg = 1;
4796					hw->autoneg_advertised = 0x2F;
4797				} else {
4798					u32 speed;
4799					if (mii_reg & 0x40)
4800						speed = SPEED_1000;
4801					else if (mii_reg & 0x2000)
4802						speed = SPEED_100;
4803					else
4804						speed = SPEED_10;
4805					retval = e1000_set_spd_dplx(
4806						adapter, speed,
4807						((mii_reg & 0x100)
4808						 ? DUPLEX_FULL :
4809						 DUPLEX_HALF));
4810					if (retval)
4811						return retval;
4812				}
4813				if (netif_running(adapter->netdev))
4814					e1000_reinit_locked(adapter);
4815				else
4816					e1000_reset(adapter);
4817				break;
4818			case M88E1000_PHY_SPEC_CTRL:
4819			case M88E1000_EXT_PHY_SPEC_CTRL:
4820				if (e1000_phy_reset(hw))
4821					return -EIO;
4822				break;
4823			}
4824		} else {
4825			switch (data->reg_num) {
4826			case PHY_CTRL:
4827				if (mii_reg & MII_CR_POWER_DOWN)
4828					break;
4829				if (netif_running(adapter->netdev))
4830					e1000_reinit_locked(adapter);
4831				else
4832					e1000_reset(adapter);
4833				break;
4834			}
4835		}
4836		break;
4837	default:
4838		return -EOPNOTSUPP;
4839	}
4840	return E1000_SUCCESS;
4841}
4842
4843void e1000_pci_set_mwi(struct e1000_hw *hw)
4844{
4845	struct e1000_adapter *adapter = hw->back;
4846	int ret_val = pci_set_mwi(adapter->pdev);
4847
4848	if (ret_val)
4849		e_err(probe, "Error in setting MWI\n");
4850}
4851
4852void e1000_pci_clear_mwi(struct e1000_hw *hw)
4853{
4854	struct e1000_adapter *adapter = hw->back;
4855
4856	pci_clear_mwi(adapter->pdev);
4857}
4858
4859int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4860{
4861	struct e1000_adapter *adapter = hw->back;
4862	return pcix_get_mmrbc(adapter->pdev);
4863}
4864
4865void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4866{
4867	struct e1000_adapter *adapter = hw->back;
4868	pcix_set_mmrbc(adapter->pdev, mmrbc);
4869}
4870
4871void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4872{
4873	outl(value, port);
4874}
4875
4876static bool e1000_vlan_used(struct e1000_adapter *adapter)
4877{
4878	u16 vid;
4879
4880	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4881		return true;
4882	return false;
4883}
4884
4885static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4886			      netdev_features_t features)
4887{
4888	struct e1000_hw *hw = &adapter->hw;
4889	u32 ctrl;
4890
4891	ctrl = er32(CTRL);
4892	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4893		/* enable VLAN tag insert/strip */
4894		ctrl |= E1000_CTRL_VME;
4895	} else {
4896		/* disable VLAN tag insert/strip */
4897		ctrl &= ~E1000_CTRL_VME;
4898	}
4899	ew32(CTRL, ctrl);
4900}
4901static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4902				     bool filter_on)
4903{
4904	struct e1000_hw *hw = &adapter->hw;
4905	u32 rctl;
4906
4907	if (!test_bit(__E1000_DOWN, &adapter->flags))
4908		e1000_irq_disable(adapter);
4909
4910	__e1000_vlan_mode(adapter, adapter->netdev->features);
4911	if (filter_on) {
4912		/* enable VLAN receive filtering */
4913		rctl = er32(RCTL);
4914		rctl &= ~E1000_RCTL_CFIEN;
4915		if (!(adapter->netdev->flags & IFF_PROMISC))
4916			rctl |= E1000_RCTL_VFE;
4917		ew32(RCTL, rctl);
4918		e1000_update_mng_vlan(adapter);
4919	} else {
4920		/* disable VLAN receive filtering */
4921		rctl = er32(RCTL);
4922		rctl &= ~E1000_RCTL_VFE;
4923		ew32(RCTL, rctl);
4924	}
4925
4926	if (!test_bit(__E1000_DOWN, &adapter->flags))
4927		e1000_irq_enable(adapter);
4928}
4929
4930static void e1000_vlan_mode(struct net_device *netdev,
4931			    netdev_features_t features)
4932{
4933	struct e1000_adapter *adapter = netdev_priv(netdev);
4934
4935	if (!test_bit(__E1000_DOWN, &adapter->flags))
4936		e1000_irq_disable(adapter);
4937
4938	__e1000_vlan_mode(adapter, features);
4939
4940	if (!test_bit(__E1000_DOWN, &adapter->flags))
4941		e1000_irq_enable(adapter);
4942}
4943
4944static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4945				 __be16 proto, u16 vid)
4946{
4947	struct e1000_adapter *adapter = netdev_priv(netdev);
4948	struct e1000_hw *hw = &adapter->hw;
4949	u32 vfta, index;
4950
4951	if ((hw->mng_cookie.status &
4952	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4953	    (vid == adapter->mng_vlan_id))
4954		return 0;
4955
4956	if (!e1000_vlan_used(adapter))
4957		e1000_vlan_filter_on_off(adapter, true);
4958
4959	/* add VID to filter table */
4960	index = (vid >> 5) & 0x7F;
4961	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4962	vfta |= (1 << (vid & 0x1F));
4963	e1000_write_vfta(hw, index, vfta);
4964
4965	set_bit(vid, adapter->active_vlans);
4966
4967	return 0;
4968}
4969
4970static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4971				  __be16 proto, u16 vid)
4972{
4973	struct e1000_adapter *adapter = netdev_priv(netdev);
4974	struct e1000_hw *hw = &adapter->hw;
4975	u32 vfta, index;
4976
4977	if (!test_bit(__E1000_DOWN, &adapter->flags))
4978		e1000_irq_disable(adapter);
4979	if (!test_bit(__E1000_DOWN, &adapter->flags))
4980		e1000_irq_enable(adapter);
4981
4982	/* remove VID from filter table */
4983	index = (vid >> 5) & 0x7F;
4984	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4985	vfta &= ~(1 << (vid & 0x1F));
4986	e1000_write_vfta(hw, index, vfta);
4987
4988	clear_bit(vid, adapter->active_vlans);
4989
4990	if (!e1000_vlan_used(adapter))
4991		e1000_vlan_filter_on_off(adapter, false);
4992
4993	return 0;
4994}
4995
4996static void e1000_restore_vlan(struct e1000_adapter *adapter)
4997{
4998	u16 vid;
4999
5000	if (!e1000_vlan_used(adapter))
5001		return;
5002
5003	e1000_vlan_filter_on_off(adapter, true);
5004	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5005		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5006}
5007
5008int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5009{
5010	struct e1000_hw *hw = &adapter->hw;
5011
5012	hw->autoneg = 0;
5013
5014	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5015	 * for the switch() below to work
5016	 */
5017	if ((spd & 1) || (dplx & ~1))
5018		goto err_inval;
5019
5020	/* Fiber NICs only allow 1000 gbps Full duplex */
5021	if ((hw->media_type == e1000_media_type_fiber) &&
5022	    spd != SPEED_1000 &&
5023	    dplx != DUPLEX_FULL)
5024		goto err_inval;
5025
5026	switch (spd + dplx) {
5027	case SPEED_10 + DUPLEX_HALF:
5028		hw->forced_speed_duplex = e1000_10_half;
5029		break;
5030	case SPEED_10 + DUPLEX_FULL:
5031		hw->forced_speed_duplex = e1000_10_full;
5032		break;
5033	case SPEED_100 + DUPLEX_HALF:
5034		hw->forced_speed_duplex = e1000_100_half;
5035		break;
5036	case SPEED_100 + DUPLEX_FULL:
5037		hw->forced_speed_duplex = e1000_100_full;
5038		break;
5039	case SPEED_1000 + DUPLEX_FULL:
5040		hw->autoneg = 1;
5041		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5042		break;
5043	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5044	default:
5045		goto err_inval;
5046	}
5047
5048	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5049	hw->mdix = AUTO_ALL_MODES;
5050
5051	return 0;
5052
5053err_inval:
5054	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5055	return -EINVAL;
5056}
5057
5058static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5059{
5060	struct net_device *netdev = pci_get_drvdata(pdev);
5061	struct e1000_adapter *adapter = netdev_priv(netdev);
5062	struct e1000_hw *hw = &adapter->hw;
5063	u32 ctrl, ctrl_ext, rctl, status;
5064	u32 wufc = adapter->wol;
 
 
 
5065
5066	netif_device_detach(netdev);
5067
5068	if (netif_running(netdev)) {
5069		int count = E1000_CHECK_RESET_COUNT;
5070
5071		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5072			usleep_range(10000, 20000);
5073
5074		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5075		e1000_down(adapter);
5076	}
5077
 
 
 
 
 
 
5078	status = er32(STATUS);
5079	if (status & E1000_STATUS_LU)
5080		wufc &= ~E1000_WUFC_LNKC;
5081
5082	if (wufc) {
5083		e1000_setup_rctl(adapter);
5084		e1000_set_rx_mode(netdev);
5085
5086		rctl = er32(RCTL);
5087
5088		/* turn on all-multi mode if wake on multicast is enabled */
5089		if (wufc & E1000_WUFC_MC)
5090			rctl |= E1000_RCTL_MPE;
5091
5092		/* enable receives in the hardware */
5093		ew32(RCTL, rctl | E1000_RCTL_EN);
5094
5095		if (hw->mac_type >= e1000_82540) {
5096			ctrl = er32(CTRL);
5097			/* advertise wake from D3Cold */
5098			#define E1000_CTRL_ADVD3WUC 0x00100000
5099			/* phy power management enable */
5100			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5101			ctrl |= E1000_CTRL_ADVD3WUC |
5102				E1000_CTRL_EN_PHY_PWR_MGMT;
5103			ew32(CTRL, ctrl);
5104		}
5105
5106		if (hw->media_type == e1000_media_type_fiber ||
5107		    hw->media_type == e1000_media_type_internal_serdes) {
5108			/* keep the laser running in D3 */
5109			ctrl_ext = er32(CTRL_EXT);
5110			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5111			ew32(CTRL_EXT, ctrl_ext);
5112		}
5113
5114		ew32(WUC, E1000_WUC_PME_EN);
5115		ew32(WUFC, wufc);
5116	} else {
5117		ew32(WUC, 0);
5118		ew32(WUFC, 0);
5119	}
5120
5121	e1000_release_manageability(adapter);
5122
5123	*enable_wake = !!wufc;
5124
5125	/* make sure adapter isn't asleep if manageability is enabled */
5126	if (adapter->en_mng_pt)
5127		*enable_wake = true;
5128
5129	if (netif_running(netdev))
5130		e1000_free_irq(adapter);
5131
5132	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5133		pci_disable_device(pdev);
5134
5135	return 0;
5136}
5137
5138static int __maybe_unused e1000_suspend(struct device *dev)
 
5139{
5140	int retval;
5141	struct pci_dev *pdev = to_pci_dev(dev);
5142	bool wake;
5143
5144	retval = __e1000_shutdown(pdev, &wake);
5145	device_set_wakeup_enable(dev, wake);
 
5146
5147	return retval;
 
 
 
 
 
 
 
5148}
5149
5150static int __maybe_unused e1000_resume(struct device *dev)
5151{
5152	struct pci_dev *pdev = to_pci_dev(dev);
5153	struct net_device *netdev = pci_get_drvdata(pdev);
5154	struct e1000_adapter *adapter = netdev_priv(netdev);
5155	struct e1000_hw *hw = &adapter->hw;
5156	u32 err;
5157
 
 
 
 
5158	if (adapter->need_ioport)
5159		err = pci_enable_device(pdev);
5160	else
5161		err = pci_enable_device_mem(pdev);
5162	if (err) {
5163		pr_err("Cannot enable PCI device from suspend\n");
5164		return err;
5165	}
5166
5167	/* flush memory to make sure state is correct */
5168	smp_mb__before_atomic();
5169	clear_bit(__E1000_DISABLED, &adapter->flags);
5170	pci_set_master(pdev);
5171
5172	pci_enable_wake(pdev, PCI_D3hot, 0);
5173	pci_enable_wake(pdev, PCI_D3cold, 0);
5174
5175	if (netif_running(netdev)) {
5176		err = e1000_request_irq(adapter);
5177		if (err)
5178			return err;
5179	}
5180
5181	e1000_power_up_phy(adapter);
5182	e1000_reset(adapter);
5183	ew32(WUS, ~0);
5184
5185	e1000_init_manageability(adapter);
5186
5187	if (netif_running(netdev))
5188		e1000_up(adapter);
5189
5190	netif_device_attach(netdev);
5191
5192	return 0;
5193}
 
5194
5195static void e1000_shutdown(struct pci_dev *pdev)
5196{
5197	bool wake;
5198
5199	__e1000_shutdown(pdev, &wake);
5200
5201	if (system_state == SYSTEM_POWER_OFF) {
5202		pci_wake_from_d3(pdev, wake);
5203		pci_set_power_state(pdev, PCI_D3hot);
5204	}
5205}
5206
5207#ifdef CONFIG_NET_POLL_CONTROLLER
5208/* Polling 'interrupt' - used by things like netconsole to send skbs
5209 * without having to re-enable interrupts. It's not called while
5210 * the interrupt routine is executing.
5211 */
5212static void e1000_netpoll(struct net_device *netdev)
5213{
5214	struct e1000_adapter *adapter = netdev_priv(netdev);
5215
5216	if (disable_hardirq(adapter->pdev->irq))
5217		e1000_intr(adapter->pdev->irq, netdev);
5218	enable_irq(adapter->pdev->irq);
5219}
5220#endif
5221
5222/**
5223 * e1000_io_error_detected - called when PCI error is detected
5224 * @pdev: Pointer to PCI device
5225 * @state: The current pci connection state
5226 *
5227 * This function is called after a PCI bus error affecting
5228 * this device has been detected.
5229 */
5230static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5231						pci_channel_state_t state)
5232{
5233	struct net_device *netdev = pci_get_drvdata(pdev);
5234	struct e1000_adapter *adapter = netdev_priv(netdev);
5235
5236	netif_device_detach(netdev);
5237
5238	if (state == pci_channel_io_perm_failure)
5239		return PCI_ERS_RESULT_DISCONNECT;
5240
5241	if (netif_running(netdev))
5242		e1000_down(adapter);
 
5243
5244	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5245		pci_disable_device(pdev);
5246
5247	/* Request a slot reset. */
5248	return PCI_ERS_RESULT_NEED_RESET;
5249}
5250
5251/**
5252 * e1000_io_slot_reset - called after the pci bus has been reset.
5253 * @pdev: Pointer to PCI device
5254 *
5255 * Restart the card from scratch, as if from a cold-boot. Implementation
5256 * resembles the first-half of the e1000_resume routine.
5257 */
5258static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5259{
5260	struct net_device *netdev = pci_get_drvdata(pdev);
5261	struct e1000_adapter *adapter = netdev_priv(netdev);
5262	struct e1000_hw *hw = &adapter->hw;
5263	int err;
5264
5265	if (adapter->need_ioport)
5266		err = pci_enable_device(pdev);
5267	else
5268		err = pci_enable_device_mem(pdev);
5269	if (err) {
5270		pr_err("Cannot re-enable PCI device after reset.\n");
5271		return PCI_ERS_RESULT_DISCONNECT;
5272	}
5273
5274	/* flush memory to make sure state is correct */
5275	smp_mb__before_atomic();
5276	clear_bit(__E1000_DISABLED, &adapter->flags);
5277	pci_set_master(pdev);
5278
5279	pci_enable_wake(pdev, PCI_D3hot, 0);
5280	pci_enable_wake(pdev, PCI_D3cold, 0);
5281
5282	e1000_reset(adapter);
5283	ew32(WUS, ~0);
5284
5285	return PCI_ERS_RESULT_RECOVERED;
5286}
5287
5288/**
5289 * e1000_io_resume - called when traffic can start flowing again.
5290 * @pdev: Pointer to PCI device
5291 *
5292 * This callback is called when the error recovery driver tells us that
5293 * its OK to resume normal operation. Implementation resembles the
5294 * second-half of the e1000_resume routine.
5295 */
5296static void e1000_io_resume(struct pci_dev *pdev)
5297{
5298	struct net_device *netdev = pci_get_drvdata(pdev);
5299	struct e1000_adapter *adapter = netdev_priv(netdev);
5300
5301	e1000_init_manageability(adapter);
5302
5303	if (netif_running(netdev)) {
5304		if (e1000_up(adapter)) {
5305			pr_info("can't bring device back up after reset\n");
5306			return;
5307		}
5308	}
5309
5310	netif_device_attach(netdev);
5311}
5312
5313/* e1000_main.c */
v4.10.11
   1/*******************************************************************************
   2
   3  Intel PRO/1000 Linux driver
   4  Copyright(c) 1999 - 2006 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#include "e1000.h"
  30#include <net/ip6_checksum.h>
  31#include <linux/io.h>
  32#include <linux/prefetch.h>
  33#include <linux/bitops.h>
  34#include <linux/if_vlan.h>
  35
  36char e1000_driver_name[] = "e1000";
  37static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  38#define DRV_VERSION "7.3.21-k8-NAPI"
  39const char e1000_driver_version[] = DRV_VERSION;
  40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  41
  42/* e1000_pci_tbl - PCI Device ID Table
  43 *
  44 * Last entry must be all 0s
  45 *
  46 * Macro expands to...
  47 *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  48 */
  49static const struct pci_device_id e1000_pci_tbl[] = {
  50	INTEL_E1000_ETHERNET_DEVICE(0x1000),
  51	INTEL_E1000_ETHERNET_DEVICE(0x1001),
  52	INTEL_E1000_ETHERNET_DEVICE(0x1004),
  53	INTEL_E1000_ETHERNET_DEVICE(0x1008),
  54	INTEL_E1000_ETHERNET_DEVICE(0x1009),
  55	INTEL_E1000_ETHERNET_DEVICE(0x100C),
  56	INTEL_E1000_ETHERNET_DEVICE(0x100D),
  57	INTEL_E1000_ETHERNET_DEVICE(0x100E),
  58	INTEL_E1000_ETHERNET_DEVICE(0x100F),
  59	INTEL_E1000_ETHERNET_DEVICE(0x1010),
  60	INTEL_E1000_ETHERNET_DEVICE(0x1011),
  61	INTEL_E1000_ETHERNET_DEVICE(0x1012),
  62	INTEL_E1000_ETHERNET_DEVICE(0x1013),
  63	INTEL_E1000_ETHERNET_DEVICE(0x1014),
  64	INTEL_E1000_ETHERNET_DEVICE(0x1015),
  65	INTEL_E1000_ETHERNET_DEVICE(0x1016),
  66	INTEL_E1000_ETHERNET_DEVICE(0x1017),
  67	INTEL_E1000_ETHERNET_DEVICE(0x1018),
  68	INTEL_E1000_ETHERNET_DEVICE(0x1019),
  69	INTEL_E1000_ETHERNET_DEVICE(0x101A),
  70	INTEL_E1000_ETHERNET_DEVICE(0x101D),
  71	INTEL_E1000_ETHERNET_DEVICE(0x101E),
  72	INTEL_E1000_ETHERNET_DEVICE(0x1026),
  73	INTEL_E1000_ETHERNET_DEVICE(0x1027),
  74	INTEL_E1000_ETHERNET_DEVICE(0x1028),
  75	INTEL_E1000_ETHERNET_DEVICE(0x1075),
  76	INTEL_E1000_ETHERNET_DEVICE(0x1076),
  77	INTEL_E1000_ETHERNET_DEVICE(0x1077),
  78	INTEL_E1000_ETHERNET_DEVICE(0x1078),
  79	INTEL_E1000_ETHERNET_DEVICE(0x1079),
  80	INTEL_E1000_ETHERNET_DEVICE(0x107A),
  81	INTEL_E1000_ETHERNET_DEVICE(0x107B),
  82	INTEL_E1000_ETHERNET_DEVICE(0x107C),
  83	INTEL_E1000_ETHERNET_DEVICE(0x108A),
  84	INTEL_E1000_ETHERNET_DEVICE(0x1099),
  85	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  86	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  87	/* required last entry */
  88	{0,}
  89};
  90
  91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  92
  93int e1000_up(struct e1000_adapter *adapter);
  94void e1000_down(struct e1000_adapter *adapter);
  95void e1000_reinit_locked(struct e1000_adapter *adapter);
  96void e1000_reset(struct e1000_adapter *adapter);
  97int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
 102				    struct e1000_tx_ring *txdr);
 103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
 104				    struct e1000_rx_ring *rxdr);
 105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
 106				    struct e1000_tx_ring *tx_ring);
 107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
 108				    struct e1000_rx_ring *rx_ring);
 109void e1000_update_stats(struct e1000_adapter *adapter);
 110
 111static int e1000_init_module(void);
 112static void e1000_exit_module(void);
 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 114static void e1000_remove(struct pci_dev *pdev);
 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
 116static int e1000_sw_init(struct e1000_adapter *adapter);
 117int e1000_open(struct net_device *netdev);
 118int e1000_close(struct net_device *netdev);
 119static void e1000_configure_tx(struct e1000_adapter *adapter);
 120static void e1000_configure_rx(struct e1000_adapter *adapter);
 121static void e1000_setup_rctl(struct e1000_adapter *adapter);
 122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 125				struct e1000_tx_ring *tx_ring);
 126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 127				struct e1000_rx_ring *rx_ring);
 128static void e1000_set_rx_mode(struct net_device *netdev);
 129static void e1000_update_phy_info_task(struct work_struct *work);
 130static void e1000_watchdog(struct work_struct *work);
 131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 133				    struct net_device *netdev);
 134static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
 135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 136static int e1000_set_mac(struct net_device *netdev, void *p);
 137static irqreturn_t e1000_intr(int irq, void *data);
 138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 139			       struct e1000_tx_ring *tx_ring);
 140static int e1000_clean(struct napi_struct *napi, int budget);
 141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 142			       struct e1000_rx_ring *rx_ring,
 143			       int *work_done, int work_to_do);
 144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 145				     struct e1000_rx_ring *rx_ring,
 146				     int *work_done, int work_to_do);
 147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 148					 struct e1000_rx_ring *rx_ring,
 149					 int cleaned_count)
 150{
 151}
 152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 153				   struct e1000_rx_ring *rx_ring,
 154				   int cleaned_count);
 155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 156					 struct e1000_rx_ring *rx_ring,
 157					 int cleaned_count);
 158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 160			   int cmd);
 161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 163static void e1000_tx_timeout(struct net_device *dev);
 164static void e1000_reset_task(struct work_struct *work);
 165static void e1000_smartspeed(struct e1000_adapter *adapter);
 166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 167				       struct sk_buff *skb);
 168
 169static bool e1000_vlan_used(struct e1000_adapter *adapter);
 170static void e1000_vlan_mode(struct net_device *netdev,
 171			    netdev_features_t features);
 172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 173				     bool filter_on);
 174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 175				 __be16 proto, u16 vid);
 176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 177				  __be16 proto, u16 vid);
 178static void e1000_restore_vlan(struct e1000_adapter *adapter);
 179
 180#ifdef CONFIG_PM
 181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 182static int e1000_resume(struct pci_dev *pdev);
 183#endif
 184static void e1000_shutdown(struct pci_dev *pdev);
 185
 186#ifdef CONFIG_NET_POLL_CONTROLLER
 187/* for netdump / net console */
 188static void e1000_netpoll (struct net_device *netdev);
 189#endif
 190
 191#define COPYBREAK_DEFAULT 256
 192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 193module_param(copybreak, uint, 0644);
 194MODULE_PARM_DESC(copybreak,
 195	"Maximum size of packet that is copied to a new buffer on receive");
 196
 197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 198						pci_channel_state_t state);
 199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 200static void e1000_io_resume(struct pci_dev *pdev);
 201
 202static const struct pci_error_handlers e1000_err_handler = {
 203	.error_detected = e1000_io_error_detected,
 204	.slot_reset = e1000_io_slot_reset,
 205	.resume = e1000_io_resume,
 206};
 207
 
 
 208static struct pci_driver e1000_driver = {
 209	.name     = e1000_driver_name,
 210	.id_table = e1000_pci_tbl,
 211	.probe    = e1000_probe,
 212	.remove   = e1000_remove,
 213#ifdef CONFIG_PM
 214	/* Power Management Hooks */
 215	.suspend  = e1000_suspend,
 216	.resume   = e1000_resume,
 217#endif
 218	.shutdown = e1000_shutdown,
 219	.err_handler = &e1000_err_handler
 220};
 221
 222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 224MODULE_LICENSE("GPL");
 225MODULE_VERSION(DRV_VERSION);
 226
 227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 228static int debug = -1;
 229module_param(debug, int, 0);
 230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 231
 232/**
 233 * e1000_get_hw_dev - return device
 234 * used by hardware layer to print debugging information
 
 
 235 *
 236 **/
 237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 238{
 239	struct e1000_adapter *adapter = hw->back;
 240	return adapter->netdev;
 241}
 242
 243/**
 244 * e1000_init_module - Driver Registration Routine
 245 *
 246 * e1000_init_module is the first routine called when the driver is
 247 * loaded. All it does is register with the PCI subsystem.
 248 **/
 249static int __init e1000_init_module(void)
 250{
 251	int ret;
 252	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 253
 254	pr_info("%s\n", e1000_copyright);
 255
 256	ret = pci_register_driver(&e1000_driver);
 257	if (copybreak != COPYBREAK_DEFAULT) {
 258		if (copybreak == 0)
 259			pr_info("copybreak disabled\n");
 260		else
 261			pr_info("copybreak enabled for "
 262				   "packets <= %u bytes\n", copybreak);
 263	}
 264	return ret;
 265}
 266
 267module_init(e1000_init_module);
 268
 269/**
 270 * e1000_exit_module - Driver Exit Cleanup Routine
 271 *
 272 * e1000_exit_module is called just before the driver is removed
 273 * from memory.
 274 **/
 275static void __exit e1000_exit_module(void)
 276{
 277	pci_unregister_driver(&e1000_driver);
 278}
 279
 280module_exit(e1000_exit_module);
 281
 282static int e1000_request_irq(struct e1000_adapter *adapter)
 283{
 284	struct net_device *netdev = adapter->netdev;
 285	irq_handler_t handler = e1000_intr;
 286	int irq_flags = IRQF_SHARED;
 287	int err;
 288
 289	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 290			  netdev);
 291	if (err) {
 292		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 293	}
 294
 295	return err;
 296}
 297
 298static void e1000_free_irq(struct e1000_adapter *adapter)
 299{
 300	struct net_device *netdev = adapter->netdev;
 301
 302	free_irq(adapter->pdev->irq, netdev);
 303}
 304
 305/**
 306 * e1000_irq_disable - Mask off interrupt generation on the NIC
 307 * @adapter: board private structure
 308 **/
 309static void e1000_irq_disable(struct e1000_adapter *adapter)
 310{
 311	struct e1000_hw *hw = &adapter->hw;
 312
 313	ew32(IMC, ~0);
 314	E1000_WRITE_FLUSH();
 315	synchronize_irq(adapter->pdev->irq);
 316}
 317
 318/**
 319 * e1000_irq_enable - Enable default interrupt generation settings
 320 * @adapter: board private structure
 321 **/
 322static void e1000_irq_enable(struct e1000_adapter *adapter)
 323{
 324	struct e1000_hw *hw = &adapter->hw;
 325
 326	ew32(IMS, IMS_ENABLE_MASK);
 327	E1000_WRITE_FLUSH();
 328}
 329
 330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 331{
 332	struct e1000_hw *hw = &adapter->hw;
 333	struct net_device *netdev = adapter->netdev;
 334	u16 vid = hw->mng_cookie.vlan_id;
 335	u16 old_vid = adapter->mng_vlan_id;
 336
 337	if (!e1000_vlan_used(adapter))
 338		return;
 339
 340	if (!test_bit(vid, adapter->active_vlans)) {
 341		if (hw->mng_cookie.status &
 342		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 343			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 344			adapter->mng_vlan_id = vid;
 345		} else {
 346			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 347		}
 348		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 349		    (vid != old_vid) &&
 350		    !test_bit(old_vid, adapter->active_vlans))
 351			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 352					       old_vid);
 353	} else {
 354		adapter->mng_vlan_id = vid;
 355	}
 356}
 357
 358static void e1000_init_manageability(struct e1000_adapter *adapter)
 359{
 360	struct e1000_hw *hw = &adapter->hw;
 361
 362	if (adapter->en_mng_pt) {
 363		u32 manc = er32(MANC);
 364
 365		/* disable hardware interception of ARP */
 366		manc &= ~(E1000_MANC_ARP_EN);
 367
 368		ew32(MANC, manc);
 369	}
 370}
 371
 372static void e1000_release_manageability(struct e1000_adapter *adapter)
 373{
 374	struct e1000_hw *hw = &adapter->hw;
 375
 376	if (adapter->en_mng_pt) {
 377		u32 manc = er32(MANC);
 378
 379		/* re-enable hardware interception of ARP */
 380		manc |= E1000_MANC_ARP_EN;
 381
 382		ew32(MANC, manc);
 383	}
 384}
 385
 386/**
 387 * e1000_configure - configure the hardware for RX and TX
 388 * @adapter = private board structure
 389 **/
 390static void e1000_configure(struct e1000_adapter *adapter)
 391{
 392	struct net_device *netdev = adapter->netdev;
 393	int i;
 394
 395	e1000_set_rx_mode(netdev);
 396
 397	e1000_restore_vlan(adapter);
 398	e1000_init_manageability(adapter);
 399
 400	e1000_configure_tx(adapter);
 401	e1000_setup_rctl(adapter);
 402	e1000_configure_rx(adapter);
 403	/* call E1000_DESC_UNUSED which always leaves
 404	 * at least 1 descriptor unused to make sure
 405	 * next_to_use != next_to_clean
 406	 */
 407	for (i = 0; i < adapter->num_rx_queues; i++) {
 408		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 409		adapter->alloc_rx_buf(adapter, ring,
 410				      E1000_DESC_UNUSED(ring));
 411	}
 412}
 413
 414int e1000_up(struct e1000_adapter *adapter)
 415{
 416	struct e1000_hw *hw = &adapter->hw;
 417
 418	/* hardware has been reset, we need to reload some things */
 419	e1000_configure(adapter);
 420
 421	clear_bit(__E1000_DOWN, &adapter->flags);
 422
 423	napi_enable(&adapter->napi);
 424
 425	e1000_irq_enable(adapter);
 426
 427	netif_wake_queue(adapter->netdev);
 428
 429	/* fire a link change interrupt to start the watchdog */
 430	ew32(ICS, E1000_ICS_LSC);
 431	return 0;
 432}
 433
 434/**
 435 * e1000_power_up_phy - restore link in case the phy was powered down
 436 * @adapter: address of board private structure
 437 *
 438 * The phy may be powered down to save power and turn off link when the
 439 * driver is unloaded and wake on lan is not enabled (among others)
 440 * *** this routine MUST be followed by a call to e1000_reset ***
 441 **/
 442void e1000_power_up_phy(struct e1000_adapter *adapter)
 443{
 444	struct e1000_hw *hw = &adapter->hw;
 445	u16 mii_reg = 0;
 446
 447	/* Just clear the power down bit to wake the phy back up */
 448	if (hw->media_type == e1000_media_type_copper) {
 449		/* according to the manual, the phy will retain its
 450		 * settings across a power-down/up cycle
 451		 */
 452		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 453		mii_reg &= ~MII_CR_POWER_DOWN;
 454		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 455	}
 456}
 457
 458static void e1000_power_down_phy(struct e1000_adapter *adapter)
 459{
 460	struct e1000_hw *hw = &adapter->hw;
 461
 462	/* Power down the PHY so no link is implied when interface is down *
 463	 * The PHY cannot be powered down if any of the following is true *
 464	 * (a) WoL is enabled
 465	 * (b) AMT is active
 466	 * (c) SoL/IDER session is active
 467	 */
 468	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 469	   hw->media_type == e1000_media_type_copper) {
 470		u16 mii_reg = 0;
 471
 472		switch (hw->mac_type) {
 473		case e1000_82540:
 474		case e1000_82545:
 475		case e1000_82545_rev_3:
 476		case e1000_82546:
 477		case e1000_ce4100:
 478		case e1000_82546_rev_3:
 479		case e1000_82541:
 480		case e1000_82541_rev_2:
 481		case e1000_82547:
 482		case e1000_82547_rev_2:
 483			if (er32(MANC) & E1000_MANC_SMBUS_EN)
 484				goto out;
 485			break;
 486		default:
 487			goto out;
 488		}
 489		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 490		mii_reg |= MII_CR_POWER_DOWN;
 491		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 492		msleep(1);
 493	}
 494out:
 495	return;
 496}
 497
 498static void e1000_down_and_stop(struct e1000_adapter *adapter)
 499{
 500	set_bit(__E1000_DOWN, &adapter->flags);
 501
 502	cancel_delayed_work_sync(&adapter->watchdog_task);
 503
 504	/*
 505	 * Since the watchdog task can reschedule other tasks, we should cancel
 506	 * it first, otherwise we can run into the situation when a work is
 507	 * still running after the adapter has been turned down.
 508	 */
 509
 510	cancel_delayed_work_sync(&adapter->phy_info_task);
 511	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 512
 513	/* Only kill reset task if adapter is not resetting */
 514	if (!test_bit(__E1000_RESETTING, &adapter->flags))
 515		cancel_work_sync(&adapter->reset_task);
 516}
 517
 518void e1000_down(struct e1000_adapter *adapter)
 519{
 520	struct e1000_hw *hw = &adapter->hw;
 521	struct net_device *netdev = adapter->netdev;
 522	u32 rctl, tctl;
 523
 524	netif_carrier_off(netdev);
 525
 526	/* disable receives in the hardware */
 527	rctl = er32(RCTL);
 528	ew32(RCTL, rctl & ~E1000_RCTL_EN);
 529	/* flush and sleep below */
 530
 531	netif_tx_disable(netdev);
 532
 533	/* disable transmits in the hardware */
 534	tctl = er32(TCTL);
 535	tctl &= ~E1000_TCTL_EN;
 536	ew32(TCTL, tctl);
 537	/* flush both disables and wait for them to finish */
 538	E1000_WRITE_FLUSH();
 539	msleep(10);
 540
 
 
 
 
 
 
 
 
 
 541	napi_disable(&adapter->napi);
 542
 543	e1000_irq_disable(adapter);
 544
 545	/* Setting DOWN must be after irq_disable to prevent
 546	 * a screaming interrupt.  Setting DOWN also prevents
 547	 * tasks from rescheduling.
 548	 */
 549	e1000_down_and_stop(adapter);
 550
 551	adapter->link_speed = 0;
 552	adapter->link_duplex = 0;
 553
 554	e1000_reset(adapter);
 555	e1000_clean_all_tx_rings(adapter);
 556	e1000_clean_all_rx_rings(adapter);
 557}
 558
 559void e1000_reinit_locked(struct e1000_adapter *adapter)
 560{
 561	WARN_ON(in_interrupt());
 562	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 563		msleep(1);
 564	e1000_down(adapter);
 565	e1000_up(adapter);
 
 
 
 
 
 566	clear_bit(__E1000_RESETTING, &adapter->flags);
 567}
 568
 569void e1000_reset(struct e1000_adapter *adapter)
 570{
 571	struct e1000_hw *hw = &adapter->hw;
 572	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 573	bool legacy_pba_adjust = false;
 574	u16 hwm;
 575
 576	/* Repartition Pba for greater than 9k mtu
 577	 * To take effect CTRL.RST is required.
 578	 */
 579
 580	switch (hw->mac_type) {
 581	case e1000_82542_rev2_0:
 582	case e1000_82542_rev2_1:
 583	case e1000_82543:
 584	case e1000_82544:
 585	case e1000_82540:
 586	case e1000_82541:
 587	case e1000_82541_rev_2:
 588		legacy_pba_adjust = true;
 589		pba = E1000_PBA_48K;
 590		break;
 591	case e1000_82545:
 592	case e1000_82545_rev_3:
 593	case e1000_82546:
 594	case e1000_ce4100:
 595	case e1000_82546_rev_3:
 596		pba = E1000_PBA_48K;
 597		break;
 598	case e1000_82547:
 599	case e1000_82547_rev_2:
 600		legacy_pba_adjust = true;
 601		pba = E1000_PBA_30K;
 602		break;
 603	case e1000_undefined:
 604	case e1000_num_macs:
 605		break;
 606	}
 607
 608	if (legacy_pba_adjust) {
 609		if (hw->max_frame_size > E1000_RXBUFFER_8192)
 610			pba -= 8; /* allocate more FIFO for Tx */
 611
 612		if (hw->mac_type == e1000_82547) {
 613			adapter->tx_fifo_head = 0;
 614			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 615			adapter->tx_fifo_size =
 616				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 617			atomic_set(&adapter->tx_fifo_stall, 0);
 618		}
 619	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 620		/* adjust PBA for jumbo frames */
 621		ew32(PBA, pba);
 622
 623		/* To maintain wire speed transmits, the Tx FIFO should be
 624		 * large enough to accommodate two full transmit packets,
 625		 * rounded up to the next 1KB and expressed in KB.  Likewise,
 626		 * the Rx FIFO should be large enough to accommodate at least
 627		 * one full receive packet and is similarly rounded up and
 628		 * expressed in KB.
 629		 */
 630		pba = er32(PBA);
 631		/* upper 16 bits has Tx packet buffer allocation size in KB */
 632		tx_space = pba >> 16;
 633		/* lower 16 bits has Rx packet buffer allocation size in KB */
 634		pba &= 0xffff;
 635		/* the Tx fifo also stores 16 bytes of information about the Tx
 636		 * but don't include ethernet FCS because hardware appends it
 637		 */
 638		min_tx_space = (hw->max_frame_size +
 639				sizeof(struct e1000_tx_desc) -
 640				ETH_FCS_LEN) * 2;
 641		min_tx_space = ALIGN(min_tx_space, 1024);
 642		min_tx_space >>= 10;
 643		/* software strips receive CRC, so leave room for it */
 644		min_rx_space = hw->max_frame_size;
 645		min_rx_space = ALIGN(min_rx_space, 1024);
 646		min_rx_space >>= 10;
 647
 648		/* If current Tx allocation is less than the min Tx FIFO size,
 649		 * and the min Tx FIFO size is less than the current Rx FIFO
 650		 * allocation, take space away from current Rx allocation
 651		 */
 652		if (tx_space < min_tx_space &&
 653		    ((min_tx_space - tx_space) < pba)) {
 654			pba = pba - (min_tx_space - tx_space);
 655
 656			/* PCI/PCIx hardware has PBA alignment constraints */
 657			switch (hw->mac_type) {
 658			case e1000_82545 ... e1000_82546_rev_3:
 659				pba &= ~(E1000_PBA_8K - 1);
 660				break;
 661			default:
 662				break;
 663			}
 664
 665			/* if short on Rx space, Rx wins and must trump Tx
 666			 * adjustment or use Early Receive if available
 667			 */
 668			if (pba < min_rx_space)
 669				pba = min_rx_space;
 670		}
 671	}
 672
 673	ew32(PBA, pba);
 674
 675	/* flow control settings:
 676	 * The high water mark must be low enough to fit one full frame
 677	 * (or the size used for early receive) above it in the Rx FIFO.
 678	 * Set it to the lower of:
 679	 * - 90% of the Rx FIFO size, and
 680	 * - the full Rx FIFO size minus the early receive size (for parts
 681	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
 682	 * - the full Rx FIFO size minus one full frame
 683	 */
 684	hwm = min(((pba << 10) * 9 / 10),
 685		  ((pba << 10) - hw->max_frame_size));
 686
 687	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
 688	hw->fc_low_water = hw->fc_high_water - 8;
 689	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 690	hw->fc_send_xon = 1;
 691	hw->fc = hw->original_fc;
 692
 693	/* Allow time for pending master requests to run */
 694	e1000_reset_hw(hw);
 695	if (hw->mac_type >= e1000_82544)
 696		ew32(WUC, 0);
 697
 698	if (e1000_init_hw(hw))
 699		e_dev_err("Hardware Error\n");
 700	e1000_update_mng_vlan(adapter);
 701
 702	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 703	if (hw->mac_type >= e1000_82544 &&
 704	    hw->autoneg == 1 &&
 705	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 706		u32 ctrl = er32(CTRL);
 707		/* clear phy power management bit if we are in gig only mode,
 708		 * which if enabled will attempt negotiation to 100Mb, which
 709		 * can cause a loss of link at power off or driver unload
 710		 */
 711		ctrl &= ~E1000_CTRL_SWDPIN3;
 712		ew32(CTRL, ctrl);
 713	}
 714
 715	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 716	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 717
 718	e1000_reset_adaptive(hw);
 719	e1000_phy_get_info(hw, &adapter->phy_info);
 720
 721	e1000_release_manageability(adapter);
 722}
 723
 724/* Dump the eeprom for users having checksum issues */
 725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 726{
 727	struct net_device *netdev = adapter->netdev;
 728	struct ethtool_eeprom eeprom;
 729	const struct ethtool_ops *ops = netdev->ethtool_ops;
 730	u8 *data;
 731	int i;
 732	u16 csum_old, csum_new = 0;
 733
 734	eeprom.len = ops->get_eeprom_len(netdev);
 735	eeprom.offset = 0;
 736
 737	data = kmalloc(eeprom.len, GFP_KERNEL);
 738	if (!data)
 739		return;
 740
 741	ops->get_eeprom(netdev, &eeprom, data);
 742
 743	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 744		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 745	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 746		csum_new += data[i] + (data[i + 1] << 8);
 747	csum_new = EEPROM_SUM - csum_new;
 748
 749	pr_err("/*********************/\n");
 750	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 751	pr_err("Calculated              : 0x%04x\n", csum_new);
 752
 753	pr_err("Offset    Values\n");
 754	pr_err("========  ======\n");
 755	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 756
 757	pr_err("Include this output when contacting your support provider.\n");
 758	pr_err("This is not a software error! Something bad happened to\n");
 759	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 760	pr_err("result in further problems, possibly loss of data,\n");
 761	pr_err("corruption or system hangs!\n");
 762	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 763	pr_err("which is invalid and requires you to set the proper MAC\n");
 764	pr_err("address manually before continuing to enable this network\n");
 765	pr_err("device. Please inspect the EEPROM dump and report the\n");
 766	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 767	pr_err("/*********************/\n");
 768
 769	kfree(data);
 770}
 771
 772/**
 773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 774 * @pdev: PCI device information struct
 775 *
 776 * Return true if an adapter needs ioport resources
 777 **/
 778static int e1000_is_need_ioport(struct pci_dev *pdev)
 779{
 780	switch (pdev->device) {
 781	case E1000_DEV_ID_82540EM:
 782	case E1000_DEV_ID_82540EM_LOM:
 783	case E1000_DEV_ID_82540EP:
 784	case E1000_DEV_ID_82540EP_LOM:
 785	case E1000_DEV_ID_82540EP_LP:
 786	case E1000_DEV_ID_82541EI:
 787	case E1000_DEV_ID_82541EI_MOBILE:
 788	case E1000_DEV_ID_82541ER:
 789	case E1000_DEV_ID_82541ER_LOM:
 790	case E1000_DEV_ID_82541GI:
 791	case E1000_DEV_ID_82541GI_LF:
 792	case E1000_DEV_ID_82541GI_MOBILE:
 793	case E1000_DEV_ID_82544EI_COPPER:
 794	case E1000_DEV_ID_82544EI_FIBER:
 795	case E1000_DEV_ID_82544GC_COPPER:
 796	case E1000_DEV_ID_82544GC_LOM:
 797	case E1000_DEV_ID_82545EM_COPPER:
 798	case E1000_DEV_ID_82545EM_FIBER:
 799	case E1000_DEV_ID_82546EB_COPPER:
 800	case E1000_DEV_ID_82546EB_FIBER:
 801	case E1000_DEV_ID_82546EB_QUAD_COPPER:
 802		return true;
 803	default:
 804		return false;
 805	}
 806}
 807
 808static netdev_features_t e1000_fix_features(struct net_device *netdev,
 809	netdev_features_t features)
 810{
 811	/* Since there is no support for separate Rx/Tx vlan accel
 812	 * enable/disable make sure Tx flag is always in same state as Rx.
 813	 */
 814	if (features & NETIF_F_HW_VLAN_CTAG_RX)
 815		features |= NETIF_F_HW_VLAN_CTAG_TX;
 816	else
 817		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 818
 819	return features;
 820}
 821
 822static int e1000_set_features(struct net_device *netdev,
 823	netdev_features_t features)
 824{
 825	struct e1000_adapter *adapter = netdev_priv(netdev);
 826	netdev_features_t changed = features ^ netdev->features;
 827
 828	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 829		e1000_vlan_mode(netdev, features);
 830
 831	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 832		return 0;
 833
 834	netdev->features = features;
 835	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 836
 837	if (netif_running(netdev))
 838		e1000_reinit_locked(adapter);
 839	else
 840		e1000_reset(adapter);
 841
 842	return 0;
 843}
 844
 845static const struct net_device_ops e1000_netdev_ops = {
 846	.ndo_open		= e1000_open,
 847	.ndo_stop		= e1000_close,
 848	.ndo_start_xmit		= e1000_xmit_frame,
 849	.ndo_get_stats		= e1000_get_stats,
 850	.ndo_set_rx_mode	= e1000_set_rx_mode,
 851	.ndo_set_mac_address	= e1000_set_mac,
 852	.ndo_tx_timeout		= e1000_tx_timeout,
 853	.ndo_change_mtu		= e1000_change_mtu,
 854	.ndo_do_ioctl		= e1000_ioctl,
 855	.ndo_validate_addr	= eth_validate_addr,
 856	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
 857	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
 858#ifdef CONFIG_NET_POLL_CONTROLLER
 859	.ndo_poll_controller	= e1000_netpoll,
 860#endif
 861	.ndo_fix_features	= e1000_fix_features,
 862	.ndo_set_features	= e1000_set_features,
 863};
 864
 865/**
 866 * e1000_init_hw_struct - initialize members of hw struct
 867 * @adapter: board private struct
 868 * @hw: structure used by e1000_hw.c
 869 *
 870 * Factors out initialization of the e1000_hw struct to its own function
 871 * that can be called very early at init (just after struct allocation).
 872 * Fields are initialized based on PCI device information and
 873 * OS network device settings (MTU size).
 874 * Returns negative error codes if MAC type setup fails.
 875 */
 876static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 877				struct e1000_hw *hw)
 878{
 879	struct pci_dev *pdev = adapter->pdev;
 880
 881	/* PCI config space info */
 882	hw->vendor_id = pdev->vendor;
 883	hw->device_id = pdev->device;
 884	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 885	hw->subsystem_id = pdev->subsystem_device;
 886	hw->revision_id = pdev->revision;
 887
 888	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 889
 890	hw->max_frame_size = adapter->netdev->mtu +
 891			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 892	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 893
 894	/* identify the MAC */
 895	if (e1000_set_mac_type(hw)) {
 896		e_err(probe, "Unknown MAC Type\n");
 897		return -EIO;
 898	}
 899
 900	switch (hw->mac_type) {
 901	default:
 902		break;
 903	case e1000_82541:
 904	case e1000_82547:
 905	case e1000_82541_rev_2:
 906	case e1000_82547_rev_2:
 907		hw->phy_init_script = 1;
 908		break;
 909	}
 910
 911	e1000_set_media_type(hw);
 912	e1000_get_bus_info(hw);
 913
 914	hw->wait_autoneg_complete = false;
 915	hw->tbi_compatibility_en = true;
 916	hw->adaptive_ifs = true;
 917
 918	/* Copper options */
 919
 920	if (hw->media_type == e1000_media_type_copper) {
 921		hw->mdix = AUTO_ALL_MODES;
 922		hw->disable_polarity_correction = false;
 923		hw->master_slave = E1000_MASTER_SLAVE;
 924	}
 925
 926	return 0;
 927}
 928
 929/**
 930 * e1000_probe - Device Initialization Routine
 931 * @pdev: PCI device information struct
 932 * @ent: entry in e1000_pci_tbl
 933 *
 934 * Returns 0 on success, negative on failure
 935 *
 936 * e1000_probe initializes an adapter identified by a pci_dev structure.
 937 * The OS initialization, configuring of the adapter private structure,
 938 * and a hardware reset occur.
 939 **/
 940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 941{
 942	struct net_device *netdev;
 943	struct e1000_adapter *adapter;
 944	struct e1000_hw *hw;
 945
 946	static int cards_found;
 947	static int global_quad_port_a; /* global ksp3 port a indication */
 948	int i, err, pci_using_dac;
 949	u16 eeprom_data = 0;
 950	u16 tmp = 0;
 951	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 952	int bars, need_ioport;
 
 953
 954	/* do not allocate ioport bars when not needed */
 955	need_ioport = e1000_is_need_ioport(pdev);
 956	if (need_ioport) {
 957		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 958		err = pci_enable_device(pdev);
 959	} else {
 960		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 961		err = pci_enable_device_mem(pdev);
 962	}
 963	if (err)
 964		return err;
 965
 966	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 967	if (err)
 968		goto err_pci_reg;
 969
 970	pci_set_master(pdev);
 971	err = pci_save_state(pdev);
 972	if (err)
 973		goto err_alloc_etherdev;
 974
 975	err = -ENOMEM;
 976	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 977	if (!netdev)
 978		goto err_alloc_etherdev;
 979
 980	SET_NETDEV_DEV(netdev, &pdev->dev);
 981
 982	pci_set_drvdata(pdev, netdev);
 983	adapter = netdev_priv(netdev);
 984	adapter->netdev = netdev;
 985	adapter->pdev = pdev;
 986	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 987	adapter->bars = bars;
 988	adapter->need_ioport = need_ioport;
 989
 990	hw = &adapter->hw;
 991	hw->back = adapter;
 992
 993	err = -EIO;
 994	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 995	if (!hw->hw_addr)
 996		goto err_ioremap;
 997
 998	if (adapter->need_ioport) {
 999		for (i = BAR_1; i <= BAR_5; i++) {
1000			if (pci_resource_len(pdev, i) == 0)
1001				continue;
1002			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1003				hw->io_base = pci_resource_start(pdev, i);
1004				break;
1005			}
1006		}
1007	}
1008
1009	/* make ready for any if (hw->...) below */
1010	err = e1000_init_hw_struct(adapter, hw);
1011	if (err)
1012		goto err_sw_init;
1013
1014	/* there is a workaround being applied below that limits
1015	 * 64-bit DMA addresses to 64-bit hardware.  There are some
1016	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1017	 */
1018	pci_using_dac = 0;
1019	if ((hw->bus_type == e1000_bus_type_pcix) &&
1020	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1021		pci_using_dac = 1;
1022	} else {
1023		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1024		if (err) {
1025			pr_err("No usable DMA config, aborting\n");
1026			goto err_dma;
1027		}
1028	}
1029
1030	netdev->netdev_ops = &e1000_netdev_ops;
1031	e1000_set_ethtool_ops(netdev);
1032	netdev->watchdog_timeo = 5 * HZ;
1033	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1034
1035	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1036
1037	adapter->bd_number = cards_found;
1038
1039	/* setup the private structure */
1040
1041	err = e1000_sw_init(adapter);
1042	if (err)
1043		goto err_sw_init;
1044
1045	err = -EIO;
1046	if (hw->mac_type == e1000_ce4100) {
1047		hw->ce4100_gbe_mdio_base_virt =
1048					ioremap(pci_resource_start(pdev, BAR_1),
1049						pci_resource_len(pdev, BAR_1));
1050
1051		if (!hw->ce4100_gbe_mdio_base_virt)
1052			goto err_mdio_ioremap;
1053	}
1054
1055	if (hw->mac_type >= e1000_82543) {
1056		netdev->hw_features = NETIF_F_SG |
1057				   NETIF_F_HW_CSUM |
1058				   NETIF_F_HW_VLAN_CTAG_RX;
1059		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1060				   NETIF_F_HW_VLAN_CTAG_FILTER;
1061	}
1062
1063	if ((hw->mac_type >= e1000_82544) &&
1064	   (hw->mac_type != e1000_82547))
1065		netdev->hw_features |= NETIF_F_TSO;
1066
1067	netdev->priv_flags |= IFF_SUPP_NOFCS;
1068
1069	netdev->features |= netdev->hw_features;
1070	netdev->hw_features |= (NETIF_F_RXCSUM |
1071				NETIF_F_RXALL |
1072				NETIF_F_RXFCS);
1073
1074	if (pci_using_dac) {
1075		netdev->features |= NETIF_F_HIGHDMA;
1076		netdev->vlan_features |= NETIF_F_HIGHDMA;
1077	}
1078
1079	netdev->vlan_features |= (NETIF_F_TSO |
1080				  NETIF_F_HW_CSUM |
1081				  NETIF_F_SG);
1082
1083	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1084	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1085	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1086		netdev->priv_flags |= IFF_UNICAST_FLT;
1087
1088	/* MTU range: 46 - 16110 */
1089	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1090	netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1091
1092	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1093
1094	/* initialize eeprom parameters */
1095	if (e1000_init_eeprom_params(hw)) {
1096		e_err(probe, "EEPROM initialization failed\n");
1097		goto err_eeprom;
1098	}
1099
1100	/* before reading the EEPROM, reset the controller to
1101	 * put the device in a known good starting state
1102	 */
1103
1104	e1000_reset_hw(hw);
1105
1106	/* make sure the EEPROM is good */
1107	if (e1000_validate_eeprom_checksum(hw) < 0) {
1108		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1109		e1000_dump_eeprom(adapter);
1110		/* set MAC address to all zeroes to invalidate and temporary
1111		 * disable this device for the user. This blocks regular
1112		 * traffic while still permitting ethtool ioctls from reaching
1113		 * the hardware as well as allowing the user to run the
1114		 * interface after manually setting a hw addr using
1115		 * `ip set address`
1116		 */
1117		memset(hw->mac_addr, 0, netdev->addr_len);
1118	} else {
1119		/* copy the MAC address out of the EEPROM */
1120		if (e1000_read_mac_addr(hw))
1121			e_err(probe, "EEPROM Read Error\n");
1122	}
1123	/* don't block initialization here due to bad MAC address */
1124	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1125
1126	if (!is_valid_ether_addr(netdev->dev_addr))
1127		e_err(probe, "Invalid MAC Address\n");
1128
1129
1130	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1131	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1132			  e1000_82547_tx_fifo_stall_task);
1133	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1134	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1135
1136	e1000_check_options(adapter);
1137
1138	/* Initial Wake on LAN setting
1139	 * If APM wake is enabled in the EEPROM,
1140	 * enable the ACPI Magic Packet filter
1141	 */
1142
1143	switch (hw->mac_type) {
1144	case e1000_82542_rev2_0:
1145	case e1000_82542_rev2_1:
1146	case e1000_82543:
1147		break;
1148	case e1000_82544:
1149		e1000_read_eeprom(hw,
1150			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1151		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1152		break;
1153	case e1000_82546:
1154	case e1000_82546_rev_3:
1155		if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1156			e1000_read_eeprom(hw,
1157				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1158			break;
1159		}
1160		/* Fall Through */
1161	default:
1162		e1000_read_eeprom(hw,
1163			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1164		break;
1165	}
1166	if (eeprom_data & eeprom_apme_mask)
1167		adapter->eeprom_wol |= E1000_WUFC_MAG;
1168
1169	/* now that we have the eeprom settings, apply the special cases
1170	 * where the eeprom may be wrong or the board simply won't support
1171	 * wake on lan on a particular port
1172	 */
1173	switch (pdev->device) {
1174	case E1000_DEV_ID_82546GB_PCIE:
1175		adapter->eeprom_wol = 0;
1176		break;
1177	case E1000_DEV_ID_82546EB_FIBER:
1178	case E1000_DEV_ID_82546GB_FIBER:
1179		/* Wake events only supported on port A for dual fiber
1180		 * regardless of eeprom setting
1181		 */
1182		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1183			adapter->eeprom_wol = 0;
1184		break;
1185	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1186		/* if quad port adapter, disable WoL on all but port A */
1187		if (global_quad_port_a != 0)
1188			adapter->eeprom_wol = 0;
1189		else
1190			adapter->quad_port_a = true;
1191		/* Reset for multiple quad port adapters */
1192		if (++global_quad_port_a == 4)
1193			global_quad_port_a = 0;
1194		break;
1195	}
1196
1197	/* initialize the wol settings based on the eeprom settings */
1198	adapter->wol = adapter->eeprom_wol;
1199	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1200
1201	/* Auto detect PHY address */
1202	if (hw->mac_type == e1000_ce4100) {
1203		for (i = 0; i < 32; i++) {
1204			hw->phy_addr = i;
1205			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1206
1207			if (tmp != 0 && tmp != 0xFF)
1208				break;
1209		}
1210
1211		if (i >= 32)
1212			goto err_eeprom;
1213	}
1214
1215	/* reset the hardware with the new settings */
1216	e1000_reset(adapter);
1217
1218	strcpy(netdev->name, "eth%d");
1219	err = register_netdev(netdev);
1220	if (err)
1221		goto err_register;
1222
1223	e1000_vlan_filter_on_off(adapter, false);
1224
1225	/* print bus type/speed/width info */
1226	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1227	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1228	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1229		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1230		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1231		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1232	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1233	       netdev->dev_addr);
1234
1235	/* carrier off reporting is important to ethtool even BEFORE open */
1236	netif_carrier_off(netdev);
1237
1238	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1239
1240	cards_found++;
1241	return 0;
1242
1243err_register:
1244err_eeprom:
1245	e1000_phy_hw_reset(hw);
1246
1247	if (hw->flash_address)
1248		iounmap(hw->flash_address);
1249	kfree(adapter->tx_ring);
1250	kfree(adapter->rx_ring);
1251err_dma:
1252err_sw_init:
1253err_mdio_ioremap:
1254	iounmap(hw->ce4100_gbe_mdio_base_virt);
1255	iounmap(hw->hw_addr);
1256err_ioremap:
 
1257	free_netdev(netdev);
1258err_alloc_etherdev:
1259	pci_release_selected_regions(pdev, bars);
1260err_pci_reg:
1261	pci_disable_device(pdev);
 
1262	return err;
1263}
1264
1265/**
1266 * e1000_remove - Device Removal Routine
1267 * @pdev: PCI device information struct
1268 *
1269 * e1000_remove is called by the PCI subsystem to alert the driver
1270 * that it should release a PCI device. That could be caused by a
1271 * Hot-Plug event, or because the driver is going to be removed from
1272 * memory.
1273 **/
1274static void e1000_remove(struct pci_dev *pdev)
1275{
1276	struct net_device *netdev = pci_get_drvdata(pdev);
1277	struct e1000_adapter *adapter = netdev_priv(netdev);
1278	struct e1000_hw *hw = &adapter->hw;
 
1279
1280	e1000_down_and_stop(adapter);
1281	e1000_release_manageability(adapter);
1282
1283	unregister_netdev(netdev);
1284
1285	e1000_phy_hw_reset(hw);
1286
1287	kfree(adapter->tx_ring);
1288	kfree(adapter->rx_ring);
1289
1290	if (hw->mac_type == e1000_ce4100)
1291		iounmap(hw->ce4100_gbe_mdio_base_virt);
1292	iounmap(hw->hw_addr);
1293	if (hw->flash_address)
1294		iounmap(hw->flash_address);
1295	pci_release_selected_regions(pdev, adapter->bars);
1296
 
1297	free_netdev(netdev);
1298
1299	pci_disable_device(pdev);
 
1300}
1301
1302/**
1303 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1304 * @adapter: board private structure to initialize
1305 *
1306 * e1000_sw_init initializes the Adapter private data structure.
1307 * e1000_init_hw_struct MUST be called before this function
1308 **/
1309static int e1000_sw_init(struct e1000_adapter *adapter)
1310{
1311	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1312
1313	adapter->num_tx_queues = 1;
1314	adapter->num_rx_queues = 1;
1315
1316	if (e1000_alloc_queues(adapter)) {
1317		e_err(probe, "Unable to allocate memory for queues\n");
1318		return -ENOMEM;
1319	}
1320
1321	/* Explicitly disable IRQ since the NIC can be in any state. */
1322	e1000_irq_disable(adapter);
1323
1324	spin_lock_init(&adapter->stats_lock);
1325
1326	set_bit(__E1000_DOWN, &adapter->flags);
1327
1328	return 0;
1329}
1330
1331/**
1332 * e1000_alloc_queues - Allocate memory for all rings
1333 * @adapter: board private structure to initialize
1334 *
1335 * We allocate one ring per queue at run-time since we don't know the
1336 * number of queues at compile-time.
1337 **/
1338static int e1000_alloc_queues(struct e1000_adapter *adapter)
1339{
1340	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1341				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
1342	if (!adapter->tx_ring)
1343		return -ENOMEM;
1344
1345	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1346				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
1347	if (!adapter->rx_ring) {
1348		kfree(adapter->tx_ring);
1349		return -ENOMEM;
1350	}
1351
1352	return E1000_SUCCESS;
1353}
1354
1355/**
1356 * e1000_open - Called when a network interface is made active
1357 * @netdev: network interface device structure
1358 *
1359 * Returns 0 on success, negative value on failure
1360 *
1361 * The open entry point is called when a network interface is made
1362 * active by the system (IFF_UP).  At this point all resources needed
1363 * for transmit and receive operations are allocated, the interrupt
1364 * handler is registered with the OS, the watchdog task is started,
1365 * and the stack is notified that the interface is ready.
1366 **/
1367int e1000_open(struct net_device *netdev)
1368{
1369	struct e1000_adapter *adapter = netdev_priv(netdev);
1370	struct e1000_hw *hw = &adapter->hw;
1371	int err;
1372
1373	/* disallow open during test */
1374	if (test_bit(__E1000_TESTING, &adapter->flags))
1375		return -EBUSY;
1376
1377	netif_carrier_off(netdev);
1378
1379	/* allocate transmit descriptors */
1380	err = e1000_setup_all_tx_resources(adapter);
1381	if (err)
1382		goto err_setup_tx;
1383
1384	/* allocate receive descriptors */
1385	err = e1000_setup_all_rx_resources(adapter);
1386	if (err)
1387		goto err_setup_rx;
1388
1389	e1000_power_up_phy(adapter);
1390
1391	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1392	if ((hw->mng_cookie.status &
1393			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1394		e1000_update_mng_vlan(adapter);
1395	}
1396
1397	/* before we allocate an interrupt, we must be ready to handle it.
1398	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1399	 * as soon as we call pci_request_irq, so we have to setup our
1400	 * clean_rx handler before we do so.
1401	 */
1402	e1000_configure(adapter);
1403
1404	err = e1000_request_irq(adapter);
1405	if (err)
1406		goto err_req_irq;
1407
1408	/* From here on the code is the same as e1000_up() */
1409	clear_bit(__E1000_DOWN, &adapter->flags);
1410
1411	napi_enable(&adapter->napi);
1412
1413	e1000_irq_enable(adapter);
1414
1415	netif_start_queue(netdev);
1416
1417	/* fire a link status change interrupt to start the watchdog */
1418	ew32(ICS, E1000_ICS_LSC);
1419
1420	return E1000_SUCCESS;
1421
1422err_req_irq:
1423	e1000_power_down_phy(adapter);
1424	e1000_free_all_rx_resources(adapter);
1425err_setup_rx:
1426	e1000_free_all_tx_resources(adapter);
1427err_setup_tx:
1428	e1000_reset(adapter);
1429
1430	return err;
1431}
1432
1433/**
1434 * e1000_close - Disables a network interface
1435 * @netdev: network interface device structure
1436 *
1437 * Returns 0, this is not allowed to fail
1438 *
1439 * The close entry point is called when an interface is de-activated
1440 * by the OS.  The hardware is still under the drivers control, but
1441 * needs to be disabled.  A global MAC reset is issued to stop the
1442 * hardware, and all transmit and receive resources are freed.
1443 **/
1444int e1000_close(struct net_device *netdev)
1445{
1446	struct e1000_adapter *adapter = netdev_priv(netdev);
1447	struct e1000_hw *hw = &adapter->hw;
1448	int count = E1000_CHECK_RESET_COUNT;
1449
1450	while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1451		usleep_range(10000, 20000);
1452
1453	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
 
 
 
 
 
1454	e1000_down(adapter);
1455	e1000_power_down_phy(adapter);
1456	e1000_free_irq(adapter);
1457
1458	e1000_free_all_tx_resources(adapter);
1459	e1000_free_all_rx_resources(adapter);
1460
1461	/* kill manageability vlan ID if supported, but not if a vlan with
1462	 * the same ID is registered on the host OS (let 8021q kill it)
1463	 */
1464	if ((hw->mng_cookie.status &
1465	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1466	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1467		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1468				       adapter->mng_vlan_id);
1469	}
1470
1471	return 0;
1472}
1473
1474/**
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
1477 * @start: address of beginning of memory
1478 * @len: length of memory
1479 **/
1480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481				  unsigned long len)
1482{
1483	struct e1000_hw *hw = &adapter->hw;
1484	unsigned long begin = (unsigned long)start;
1485	unsigned long end = begin + len;
1486
1487	/* First rev 82545 and 82546 need to not allow any memory
1488	 * write location to cross 64k boundary due to errata 23
1489	 */
1490	if (hw->mac_type == e1000_82545 ||
1491	    hw->mac_type == e1000_ce4100 ||
1492	    hw->mac_type == e1000_82546) {
1493		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1494	}
1495
1496	return true;
1497}
1498
1499/**
1500 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1501 * @adapter: board private structure
1502 * @txdr:    tx descriptor ring (for a specific queue) to setup
1503 *
1504 * Return 0 on success, negative on failure
1505 **/
1506static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507				    struct e1000_tx_ring *txdr)
1508{
1509	struct pci_dev *pdev = adapter->pdev;
1510	int size;
1511
1512	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1513	txdr->buffer_info = vzalloc(size);
1514	if (!txdr->buffer_info)
1515		return -ENOMEM;
1516
1517	/* round up to nearest 4K */
1518
1519	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1520	txdr->size = ALIGN(txdr->size, 4096);
1521
1522	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1523					GFP_KERNEL);
1524	if (!txdr->desc) {
1525setup_tx_desc_die:
1526		vfree(txdr->buffer_info);
1527		return -ENOMEM;
1528	}
1529
1530	/* Fix for errata 23, can't cross 64kB boundary */
1531	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1532		void *olddesc = txdr->desc;
1533		dma_addr_t olddma = txdr->dma;
1534		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1535		      txdr->size, txdr->desc);
1536		/* Try again, without freeing the previous */
1537		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1538						&txdr->dma, GFP_KERNEL);
1539		/* Failed allocation, critical failure */
1540		if (!txdr->desc) {
1541			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1542					  olddma);
1543			goto setup_tx_desc_die;
1544		}
1545
1546		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1547			/* give up */
1548			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1549					  txdr->dma);
1550			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551					  olddma);
1552			e_err(probe, "Unable to allocate aligned memory "
1553			      "for the transmit descriptor ring\n");
1554			vfree(txdr->buffer_info);
1555			return -ENOMEM;
1556		} else {
1557			/* Free old allocation, new allocation was successful */
1558			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1559					  olddma);
1560		}
1561	}
1562	memset(txdr->desc, 0, txdr->size);
1563
1564	txdr->next_to_use = 0;
1565	txdr->next_to_clean = 0;
1566
1567	return 0;
1568}
1569
1570/**
1571 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1572 * 				  (Descriptors) for all queues
1573 * @adapter: board private structure
1574 *
1575 * Return 0 on success, negative on failure
1576 **/
1577int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1578{
1579	int i, err = 0;
1580
1581	for (i = 0; i < adapter->num_tx_queues; i++) {
1582		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1583		if (err) {
1584			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1585			for (i-- ; i >= 0; i--)
1586				e1000_free_tx_resources(adapter,
1587							&adapter->tx_ring[i]);
1588			break;
1589		}
1590	}
1591
1592	return err;
1593}
1594
1595/**
1596 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1597 * @adapter: board private structure
1598 *
1599 * Configure the Tx unit of the MAC after a reset.
1600 **/
1601static void e1000_configure_tx(struct e1000_adapter *adapter)
1602{
1603	u64 tdba;
1604	struct e1000_hw *hw = &adapter->hw;
1605	u32 tdlen, tctl, tipg;
1606	u32 ipgr1, ipgr2;
1607
1608	/* Setup the HW Tx Head and Tail descriptor pointers */
1609
1610	switch (adapter->num_tx_queues) {
1611	case 1:
1612	default:
1613		tdba = adapter->tx_ring[0].dma;
1614		tdlen = adapter->tx_ring[0].count *
1615			sizeof(struct e1000_tx_desc);
1616		ew32(TDLEN, tdlen);
1617		ew32(TDBAH, (tdba >> 32));
1618		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1619		ew32(TDT, 0);
1620		ew32(TDH, 0);
1621		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1622					   E1000_TDH : E1000_82542_TDH);
1623		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1624					   E1000_TDT : E1000_82542_TDT);
1625		break;
1626	}
1627
1628	/* Set the default values for the Tx Inter Packet Gap timer */
1629	if ((hw->media_type == e1000_media_type_fiber ||
1630	     hw->media_type == e1000_media_type_internal_serdes))
1631		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1632	else
1633		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1634
1635	switch (hw->mac_type) {
1636	case e1000_82542_rev2_0:
1637	case e1000_82542_rev2_1:
1638		tipg = DEFAULT_82542_TIPG_IPGT;
1639		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1640		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1641		break;
1642	default:
1643		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1644		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1645		break;
1646	}
1647	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1648	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1649	ew32(TIPG, tipg);
1650
1651	/* Set the Tx Interrupt Delay register */
1652
1653	ew32(TIDV, adapter->tx_int_delay);
1654	if (hw->mac_type >= e1000_82540)
1655		ew32(TADV, adapter->tx_abs_int_delay);
1656
1657	/* Program the Transmit Control Register */
1658
1659	tctl = er32(TCTL);
1660	tctl &= ~E1000_TCTL_CT;
1661	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1662		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1663
1664	e1000_config_collision_dist(hw);
1665
1666	/* Setup Transmit Descriptor Settings for eop descriptor */
1667	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1668
1669	/* only set IDE if we are delaying interrupts using the timers */
1670	if (adapter->tx_int_delay)
1671		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1672
1673	if (hw->mac_type < e1000_82543)
1674		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1675	else
1676		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1677
1678	/* Cache if we're 82544 running in PCI-X because we'll
1679	 * need this to apply a workaround later in the send path.
1680	 */
1681	if (hw->mac_type == e1000_82544 &&
1682	    hw->bus_type == e1000_bus_type_pcix)
1683		adapter->pcix_82544 = true;
1684
1685	ew32(TCTL, tctl);
1686
1687}
1688
1689/**
1690 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1691 * @adapter: board private structure
1692 * @rxdr:    rx descriptor ring (for a specific queue) to setup
1693 *
1694 * Returns 0 on success, negative on failure
1695 **/
1696static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1697				    struct e1000_rx_ring *rxdr)
1698{
1699	struct pci_dev *pdev = adapter->pdev;
1700	int size, desc_len;
1701
1702	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1703	rxdr->buffer_info = vzalloc(size);
1704	if (!rxdr->buffer_info)
1705		return -ENOMEM;
1706
1707	desc_len = sizeof(struct e1000_rx_desc);
1708
1709	/* Round up to nearest 4K */
1710
1711	rxdr->size = rxdr->count * desc_len;
1712	rxdr->size = ALIGN(rxdr->size, 4096);
1713
1714	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1715					GFP_KERNEL);
1716	if (!rxdr->desc) {
1717setup_rx_desc_die:
1718		vfree(rxdr->buffer_info);
1719		return -ENOMEM;
1720	}
1721
1722	/* Fix for errata 23, can't cross 64kB boundary */
1723	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1724		void *olddesc = rxdr->desc;
1725		dma_addr_t olddma = rxdr->dma;
1726		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1727		      rxdr->size, rxdr->desc);
1728		/* Try again, without freeing the previous */
1729		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1730						&rxdr->dma, GFP_KERNEL);
1731		/* Failed allocation, critical failure */
1732		if (!rxdr->desc) {
1733			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1734					  olddma);
1735			goto setup_rx_desc_die;
1736		}
1737
1738		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1739			/* give up */
1740			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1741					  rxdr->dma);
1742			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1743					  olddma);
1744			e_err(probe, "Unable to allocate aligned memory for "
1745			      "the Rx descriptor ring\n");
1746			goto setup_rx_desc_die;
1747		} else {
1748			/* Free old allocation, new allocation was successful */
1749			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1750					  olddma);
1751		}
1752	}
1753	memset(rxdr->desc, 0, rxdr->size);
1754
1755	rxdr->next_to_clean = 0;
1756	rxdr->next_to_use = 0;
1757	rxdr->rx_skb_top = NULL;
1758
1759	return 0;
1760}
1761
1762/**
1763 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1764 * 				  (Descriptors) for all queues
1765 * @adapter: board private structure
1766 *
1767 * Return 0 on success, negative on failure
1768 **/
1769int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1770{
1771	int i, err = 0;
1772
1773	for (i = 0; i < adapter->num_rx_queues; i++) {
1774		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1775		if (err) {
1776			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1777			for (i-- ; i >= 0; i--)
1778				e1000_free_rx_resources(adapter,
1779							&adapter->rx_ring[i]);
1780			break;
1781		}
1782	}
1783
1784	return err;
1785}
1786
1787/**
1788 * e1000_setup_rctl - configure the receive control registers
1789 * @adapter: Board private structure
1790 **/
1791static void e1000_setup_rctl(struct e1000_adapter *adapter)
1792{
1793	struct e1000_hw *hw = &adapter->hw;
1794	u32 rctl;
1795
1796	rctl = er32(RCTL);
1797
1798	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1799
1800	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1801		E1000_RCTL_RDMTS_HALF |
1802		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1803
1804	if (hw->tbi_compatibility_on == 1)
1805		rctl |= E1000_RCTL_SBP;
1806	else
1807		rctl &= ~E1000_RCTL_SBP;
1808
1809	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1810		rctl &= ~E1000_RCTL_LPE;
1811	else
1812		rctl |= E1000_RCTL_LPE;
1813
1814	/* Setup buffer sizes */
1815	rctl &= ~E1000_RCTL_SZ_4096;
1816	rctl |= E1000_RCTL_BSEX;
1817	switch (adapter->rx_buffer_len) {
1818	case E1000_RXBUFFER_2048:
1819	default:
1820		rctl |= E1000_RCTL_SZ_2048;
1821		rctl &= ~E1000_RCTL_BSEX;
1822		break;
1823	case E1000_RXBUFFER_4096:
1824		rctl |= E1000_RCTL_SZ_4096;
1825		break;
1826	case E1000_RXBUFFER_8192:
1827		rctl |= E1000_RCTL_SZ_8192;
1828		break;
1829	case E1000_RXBUFFER_16384:
1830		rctl |= E1000_RCTL_SZ_16384;
1831		break;
1832	}
1833
1834	/* This is useful for sniffing bad packets. */
1835	if (adapter->netdev->features & NETIF_F_RXALL) {
1836		/* UPE and MPE will be handled by normal PROMISC logic
1837		 * in e1000e_set_rx_mode
1838		 */
1839		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1840			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1841			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1842
1843		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1844			  E1000_RCTL_DPF | /* Allow filtered pause */
1845			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1846		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1847		 * and that breaks VLANs.
1848		 */
1849	}
1850
1851	ew32(RCTL, rctl);
1852}
1853
1854/**
1855 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1856 * @adapter: board private structure
1857 *
1858 * Configure the Rx unit of the MAC after a reset.
1859 **/
1860static void e1000_configure_rx(struct e1000_adapter *adapter)
1861{
1862	u64 rdba;
1863	struct e1000_hw *hw = &adapter->hw;
1864	u32 rdlen, rctl, rxcsum;
1865
1866	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1867		rdlen = adapter->rx_ring[0].count *
1868			sizeof(struct e1000_rx_desc);
1869		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1870		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1871	} else {
1872		rdlen = adapter->rx_ring[0].count *
1873			sizeof(struct e1000_rx_desc);
1874		adapter->clean_rx = e1000_clean_rx_irq;
1875		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1876	}
1877
1878	/* disable receives while setting up the descriptors */
1879	rctl = er32(RCTL);
1880	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1881
1882	/* set the Receive Delay Timer Register */
1883	ew32(RDTR, adapter->rx_int_delay);
1884
1885	if (hw->mac_type >= e1000_82540) {
1886		ew32(RADV, adapter->rx_abs_int_delay);
1887		if (adapter->itr_setting != 0)
1888			ew32(ITR, 1000000000 / (adapter->itr * 256));
1889	}
1890
1891	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1892	 * the Base and Length of the Rx Descriptor Ring
1893	 */
1894	switch (adapter->num_rx_queues) {
1895	case 1:
1896	default:
1897		rdba = adapter->rx_ring[0].dma;
1898		ew32(RDLEN, rdlen);
1899		ew32(RDBAH, (rdba >> 32));
1900		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1901		ew32(RDT, 0);
1902		ew32(RDH, 0);
1903		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1904					   E1000_RDH : E1000_82542_RDH);
1905		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1906					   E1000_RDT : E1000_82542_RDT);
1907		break;
1908	}
1909
1910	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1911	if (hw->mac_type >= e1000_82543) {
1912		rxcsum = er32(RXCSUM);
1913		if (adapter->rx_csum)
1914			rxcsum |= E1000_RXCSUM_TUOFL;
1915		else
1916			/* don't need to clear IPPCSE as it defaults to 0 */
1917			rxcsum &= ~E1000_RXCSUM_TUOFL;
1918		ew32(RXCSUM, rxcsum);
1919	}
1920
1921	/* Enable Receives */
1922	ew32(RCTL, rctl | E1000_RCTL_EN);
1923}
1924
1925/**
1926 * e1000_free_tx_resources - Free Tx Resources per Queue
1927 * @adapter: board private structure
1928 * @tx_ring: Tx descriptor ring for a specific queue
1929 *
1930 * Free all transmit software resources
1931 **/
1932static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1933				    struct e1000_tx_ring *tx_ring)
1934{
1935	struct pci_dev *pdev = adapter->pdev;
1936
1937	e1000_clean_tx_ring(adapter, tx_ring);
1938
1939	vfree(tx_ring->buffer_info);
1940	tx_ring->buffer_info = NULL;
1941
1942	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1943			  tx_ring->dma);
1944
1945	tx_ring->desc = NULL;
1946}
1947
1948/**
1949 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1950 * @adapter: board private structure
1951 *
1952 * Free all transmit software resources
1953 **/
1954void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1955{
1956	int i;
1957
1958	for (i = 0; i < adapter->num_tx_queues; i++)
1959		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1960}
1961
1962static void
1963e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1964				 struct e1000_tx_buffer *buffer_info)
 
1965{
1966	if (buffer_info->dma) {
1967		if (buffer_info->mapped_as_page)
1968			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1969				       buffer_info->length, DMA_TO_DEVICE);
1970		else
1971			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1972					 buffer_info->length,
1973					 DMA_TO_DEVICE);
1974		buffer_info->dma = 0;
1975	}
1976	if (buffer_info->skb) {
1977		dev_kfree_skb_any(buffer_info->skb);
1978		buffer_info->skb = NULL;
1979	}
1980	buffer_info->time_stamp = 0;
1981	/* buffer_info must be completely set up in the transmit path */
1982}
1983
1984/**
1985 * e1000_clean_tx_ring - Free Tx Buffers
1986 * @adapter: board private structure
1987 * @tx_ring: ring to be cleaned
1988 **/
1989static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1990				struct e1000_tx_ring *tx_ring)
1991{
1992	struct e1000_hw *hw = &adapter->hw;
1993	struct e1000_tx_buffer *buffer_info;
1994	unsigned long size;
1995	unsigned int i;
1996
1997	/* Free all the Tx ring sk_buffs */
1998
1999	for (i = 0; i < tx_ring->count; i++) {
2000		buffer_info = &tx_ring->buffer_info[i];
2001		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2002	}
2003
2004	netdev_reset_queue(adapter->netdev);
2005	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2006	memset(tx_ring->buffer_info, 0, size);
2007
2008	/* Zero out the descriptor ring */
2009
2010	memset(tx_ring->desc, 0, tx_ring->size);
2011
2012	tx_ring->next_to_use = 0;
2013	tx_ring->next_to_clean = 0;
2014	tx_ring->last_tx_tso = false;
2015
2016	writel(0, hw->hw_addr + tx_ring->tdh);
2017	writel(0, hw->hw_addr + tx_ring->tdt);
2018}
2019
2020/**
2021 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2022 * @adapter: board private structure
2023 **/
2024static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2025{
2026	int i;
2027
2028	for (i = 0; i < adapter->num_tx_queues; i++)
2029		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2030}
2031
2032/**
2033 * e1000_free_rx_resources - Free Rx Resources
2034 * @adapter: board private structure
2035 * @rx_ring: ring to clean the resources from
2036 *
2037 * Free all receive software resources
2038 **/
2039static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2040				    struct e1000_rx_ring *rx_ring)
2041{
2042	struct pci_dev *pdev = adapter->pdev;
2043
2044	e1000_clean_rx_ring(adapter, rx_ring);
2045
2046	vfree(rx_ring->buffer_info);
2047	rx_ring->buffer_info = NULL;
2048
2049	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2050			  rx_ring->dma);
2051
2052	rx_ring->desc = NULL;
2053}
2054
2055/**
2056 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2057 * @adapter: board private structure
2058 *
2059 * Free all receive software resources
2060 **/
2061void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2062{
2063	int i;
2064
2065	for (i = 0; i < adapter->num_rx_queues; i++)
2066		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2067}
2068
2069#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2070static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2071{
2072	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2073		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2074}
2075
2076static void *e1000_alloc_frag(const struct e1000_adapter *a)
2077{
2078	unsigned int len = e1000_frag_len(a);
2079	u8 *data = netdev_alloc_frag(len);
2080
2081	if (likely(data))
2082		data += E1000_HEADROOM;
2083	return data;
2084}
2085
2086/**
2087 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2088 * @adapter: board private structure
2089 * @rx_ring: ring to free buffers from
2090 **/
2091static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2092				struct e1000_rx_ring *rx_ring)
2093{
2094	struct e1000_hw *hw = &adapter->hw;
2095	struct e1000_rx_buffer *buffer_info;
2096	struct pci_dev *pdev = adapter->pdev;
2097	unsigned long size;
2098	unsigned int i;
2099
2100	/* Free all the Rx netfrags */
2101	for (i = 0; i < rx_ring->count; i++) {
2102		buffer_info = &rx_ring->buffer_info[i];
2103		if (adapter->clean_rx == e1000_clean_rx_irq) {
2104			if (buffer_info->dma)
2105				dma_unmap_single(&pdev->dev, buffer_info->dma,
2106						 adapter->rx_buffer_len,
2107						 DMA_FROM_DEVICE);
2108			if (buffer_info->rxbuf.data) {
2109				skb_free_frag(buffer_info->rxbuf.data);
2110				buffer_info->rxbuf.data = NULL;
2111			}
2112		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2113			if (buffer_info->dma)
2114				dma_unmap_page(&pdev->dev, buffer_info->dma,
2115					       adapter->rx_buffer_len,
2116					       DMA_FROM_DEVICE);
2117			if (buffer_info->rxbuf.page) {
2118				put_page(buffer_info->rxbuf.page);
2119				buffer_info->rxbuf.page = NULL;
2120			}
2121		}
2122
2123		buffer_info->dma = 0;
2124	}
2125
2126	/* there also may be some cached data from a chained receive */
2127	napi_free_frags(&adapter->napi);
2128	rx_ring->rx_skb_top = NULL;
2129
2130	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2131	memset(rx_ring->buffer_info, 0, size);
2132
2133	/* Zero out the descriptor ring */
2134	memset(rx_ring->desc, 0, rx_ring->size);
2135
2136	rx_ring->next_to_clean = 0;
2137	rx_ring->next_to_use = 0;
2138
2139	writel(0, hw->hw_addr + rx_ring->rdh);
2140	writel(0, hw->hw_addr + rx_ring->rdt);
2141}
2142
2143/**
2144 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2145 * @adapter: board private structure
2146 **/
2147static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2148{
2149	int i;
2150
2151	for (i = 0; i < adapter->num_rx_queues; i++)
2152		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2153}
2154
2155/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2156 * and memory write and invalidate disabled for certain operations
2157 */
2158static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2159{
2160	struct e1000_hw *hw = &adapter->hw;
2161	struct net_device *netdev = adapter->netdev;
2162	u32 rctl;
2163
2164	e1000_pci_clear_mwi(hw);
2165
2166	rctl = er32(RCTL);
2167	rctl |= E1000_RCTL_RST;
2168	ew32(RCTL, rctl);
2169	E1000_WRITE_FLUSH();
2170	mdelay(5);
2171
2172	if (netif_running(netdev))
2173		e1000_clean_all_rx_rings(adapter);
2174}
2175
2176static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2177{
2178	struct e1000_hw *hw = &adapter->hw;
2179	struct net_device *netdev = adapter->netdev;
2180	u32 rctl;
2181
2182	rctl = er32(RCTL);
2183	rctl &= ~E1000_RCTL_RST;
2184	ew32(RCTL, rctl);
2185	E1000_WRITE_FLUSH();
2186	mdelay(5);
2187
2188	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2189		e1000_pci_set_mwi(hw);
2190
2191	if (netif_running(netdev)) {
2192		/* No need to loop, because 82542 supports only 1 queue */
2193		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2194		e1000_configure_rx(adapter);
2195		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2196	}
2197}
2198
2199/**
2200 * e1000_set_mac - Change the Ethernet Address of the NIC
2201 * @netdev: network interface device structure
2202 * @p: pointer to an address structure
2203 *
2204 * Returns 0 on success, negative on failure
2205 **/
2206static int e1000_set_mac(struct net_device *netdev, void *p)
2207{
2208	struct e1000_adapter *adapter = netdev_priv(netdev);
2209	struct e1000_hw *hw = &adapter->hw;
2210	struct sockaddr *addr = p;
2211
2212	if (!is_valid_ether_addr(addr->sa_data))
2213		return -EADDRNOTAVAIL;
2214
2215	/* 82542 2.0 needs to be in reset to write receive address registers */
2216
2217	if (hw->mac_type == e1000_82542_rev2_0)
2218		e1000_enter_82542_rst(adapter);
2219
2220	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2221	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2222
2223	e1000_rar_set(hw, hw->mac_addr, 0);
2224
2225	if (hw->mac_type == e1000_82542_rev2_0)
2226		e1000_leave_82542_rst(adapter);
2227
2228	return 0;
2229}
2230
2231/**
2232 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2233 * @netdev: network interface device structure
2234 *
2235 * The set_rx_mode entry point is called whenever the unicast or multicast
2236 * address lists or the network interface flags are updated. This routine is
2237 * responsible for configuring the hardware for proper unicast, multicast,
2238 * promiscuous mode, and all-multi behavior.
2239 **/
2240static void e1000_set_rx_mode(struct net_device *netdev)
2241{
2242	struct e1000_adapter *adapter = netdev_priv(netdev);
2243	struct e1000_hw *hw = &adapter->hw;
2244	struct netdev_hw_addr *ha;
2245	bool use_uc = false;
2246	u32 rctl;
2247	u32 hash_value;
2248	int i, rar_entries = E1000_RAR_ENTRIES;
2249	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2250	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2251
2252	if (!mcarray)
2253		return;
2254
2255	/* Check for Promiscuous and All Multicast modes */
2256
2257	rctl = er32(RCTL);
2258
2259	if (netdev->flags & IFF_PROMISC) {
2260		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2261		rctl &= ~E1000_RCTL_VFE;
2262	} else {
2263		if (netdev->flags & IFF_ALLMULTI)
2264			rctl |= E1000_RCTL_MPE;
2265		else
2266			rctl &= ~E1000_RCTL_MPE;
2267		/* Enable VLAN filter if there is a VLAN */
2268		if (e1000_vlan_used(adapter))
2269			rctl |= E1000_RCTL_VFE;
2270	}
2271
2272	if (netdev_uc_count(netdev) > rar_entries - 1) {
2273		rctl |= E1000_RCTL_UPE;
2274	} else if (!(netdev->flags & IFF_PROMISC)) {
2275		rctl &= ~E1000_RCTL_UPE;
2276		use_uc = true;
2277	}
2278
2279	ew32(RCTL, rctl);
2280
2281	/* 82542 2.0 needs to be in reset to write receive address registers */
2282
2283	if (hw->mac_type == e1000_82542_rev2_0)
2284		e1000_enter_82542_rst(adapter);
2285
2286	/* load the first 14 addresses into the exact filters 1-14. Unicast
2287	 * addresses take precedence to avoid disabling unicast filtering
2288	 * when possible.
2289	 *
2290	 * RAR 0 is used for the station MAC address
2291	 * if there are not 14 addresses, go ahead and clear the filters
2292	 */
2293	i = 1;
2294	if (use_uc)
2295		netdev_for_each_uc_addr(ha, netdev) {
2296			if (i == rar_entries)
2297				break;
2298			e1000_rar_set(hw, ha->addr, i++);
2299		}
2300
2301	netdev_for_each_mc_addr(ha, netdev) {
2302		if (i == rar_entries) {
2303			/* load any remaining addresses into the hash table */
2304			u32 hash_reg, hash_bit, mta;
2305			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2306			hash_reg = (hash_value >> 5) & 0x7F;
2307			hash_bit = hash_value & 0x1F;
2308			mta = (1 << hash_bit);
2309			mcarray[hash_reg] |= mta;
2310		} else {
2311			e1000_rar_set(hw, ha->addr, i++);
2312		}
2313	}
2314
2315	for (; i < rar_entries; i++) {
2316		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2317		E1000_WRITE_FLUSH();
2318		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2319		E1000_WRITE_FLUSH();
2320	}
2321
2322	/* write the hash table completely, write from bottom to avoid
2323	 * both stupid write combining chipsets, and flushing each write
2324	 */
2325	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2326		/* If we are on an 82544 has an errata where writing odd
2327		 * offsets overwrites the previous even offset, but writing
2328		 * backwards over the range solves the issue by always
2329		 * writing the odd offset first
2330		 */
2331		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2332	}
2333	E1000_WRITE_FLUSH();
2334
2335	if (hw->mac_type == e1000_82542_rev2_0)
2336		e1000_leave_82542_rst(adapter);
2337
2338	kfree(mcarray);
2339}
2340
2341/**
2342 * e1000_update_phy_info_task - get phy info
2343 * @work: work struct contained inside adapter struct
2344 *
2345 * Need to wait a few seconds after link up to get diagnostic information from
2346 * the phy
2347 */
2348static void e1000_update_phy_info_task(struct work_struct *work)
2349{
2350	struct e1000_adapter *adapter = container_of(work,
2351						     struct e1000_adapter,
2352						     phy_info_task.work);
2353
2354	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2355}
2356
2357/**
2358 * e1000_82547_tx_fifo_stall_task - task to complete work
2359 * @work: work struct contained inside adapter struct
2360 **/
2361static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2362{
2363	struct e1000_adapter *adapter = container_of(work,
2364						     struct e1000_adapter,
2365						     fifo_stall_task.work);
2366	struct e1000_hw *hw = &adapter->hw;
2367	struct net_device *netdev = adapter->netdev;
2368	u32 tctl;
2369
2370	if (atomic_read(&adapter->tx_fifo_stall)) {
2371		if ((er32(TDT) == er32(TDH)) &&
2372		   (er32(TDFT) == er32(TDFH)) &&
2373		   (er32(TDFTS) == er32(TDFHS))) {
2374			tctl = er32(TCTL);
2375			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2376			ew32(TDFT, adapter->tx_head_addr);
2377			ew32(TDFH, adapter->tx_head_addr);
2378			ew32(TDFTS, adapter->tx_head_addr);
2379			ew32(TDFHS, adapter->tx_head_addr);
2380			ew32(TCTL, tctl);
2381			E1000_WRITE_FLUSH();
2382
2383			adapter->tx_fifo_head = 0;
2384			atomic_set(&adapter->tx_fifo_stall, 0);
2385			netif_wake_queue(netdev);
2386		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2387			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2388		}
2389	}
2390}
2391
2392bool e1000_has_link(struct e1000_adapter *adapter)
2393{
2394	struct e1000_hw *hw = &adapter->hw;
2395	bool link_active = false;
2396
2397	/* get_link_status is set on LSC (link status) interrupt or rx
2398	 * sequence error interrupt (except on intel ce4100).
2399	 * get_link_status will stay false until the
2400	 * e1000_check_for_link establishes link for copper adapters
2401	 * ONLY
2402	 */
2403	switch (hw->media_type) {
2404	case e1000_media_type_copper:
2405		if (hw->mac_type == e1000_ce4100)
2406			hw->get_link_status = 1;
2407		if (hw->get_link_status) {
2408			e1000_check_for_link(hw);
2409			link_active = !hw->get_link_status;
2410		} else {
2411			link_active = true;
2412		}
2413		break;
2414	case e1000_media_type_fiber:
2415		e1000_check_for_link(hw);
2416		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2417		break;
2418	case e1000_media_type_internal_serdes:
2419		e1000_check_for_link(hw);
2420		link_active = hw->serdes_has_link;
2421		break;
2422	default:
2423		break;
2424	}
2425
2426	return link_active;
2427}
2428
2429/**
2430 * e1000_watchdog - work function
2431 * @work: work struct contained inside adapter struct
2432 **/
2433static void e1000_watchdog(struct work_struct *work)
2434{
2435	struct e1000_adapter *adapter = container_of(work,
2436						     struct e1000_adapter,
2437						     watchdog_task.work);
2438	struct e1000_hw *hw = &adapter->hw;
2439	struct net_device *netdev = adapter->netdev;
2440	struct e1000_tx_ring *txdr = adapter->tx_ring;
2441	u32 link, tctl;
2442
2443	link = e1000_has_link(adapter);
2444	if ((netif_carrier_ok(netdev)) && link)
2445		goto link_up;
2446
2447	if (link) {
2448		if (!netif_carrier_ok(netdev)) {
2449			u32 ctrl;
2450			bool txb2b = true;
2451			/* update snapshot of PHY registers on LSC */
2452			e1000_get_speed_and_duplex(hw,
2453						   &adapter->link_speed,
2454						   &adapter->link_duplex);
2455
2456			ctrl = er32(CTRL);
2457			pr_info("%s NIC Link is Up %d Mbps %s, "
2458				"Flow Control: %s\n",
2459				netdev->name,
2460				adapter->link_speed,
2461				adapter->link_duplex == FULL_DUPLEX ?
2462				"Full Duplex" : "Half Duplex",
2463				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2464				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2465				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2466				E1000_CTRL_TFCE) ? "TX" : "None")));
2467
2468			/* adjust timeout factor according to speed/duplex */
2469			adapter->tx_timeout_factor = 1;
2470			switch (adapter->link_speed) {
2471			case SPEED_10:
2472				txb2b = false;
2473				adapter->tx_timeout_factor = 16;
2474				break;
2475			case SPEED_100:
2476				txb2b = false;
2477				/* maybe add some timeout factor ? */
2478				break;
2479			}
2480
2481			/* enable transmits in the hardware */
2482			tctl = er32(TCTL);
2483			tctl |= E1000_TCTL_EN;
2484			ew32(TCTL, tctl);
2485
2486			netif_carrier_on(netdev);
2487			if (!test_bit(__E1000_DOWN, &adapter->flags))
2488				schedule_delayed_work(&adapter->phy_info_task,
2489						      2 * HZ);
2490			adapter->smartspeed = 0;
2491		}
2492	} else {
2493		if (netif_carrier_ok(netdev)) {
2494			adapter->link_speed = 0;
2495			adapter->link_duplex = 0;
2496			pr_info("%s NIC Link is Down\n",
2497				netdev->name);
2498			netif_carrier_off(netdev);
2499
2500			if (!test_bit(__E1000_DOWN, &adapter->flags))
2501				schedule_delayed_work(&adapter->phy_info_task,
2502						      2 * HZ);
2503		}
2504
2505		e1000_smartspeed(adapter);
2506	}
2507
2508link_up:
2509	e1000_update_stats(adapter);
2510
2511	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2512	adapter->tpt_old = adapter->stats.tpt;
2513	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2514	adapter->colc_old = adapter->stats.colc;
2515
2516	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2517	adapter->gorcl_old = adapter->stats.gorcl;
2518	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2519	adapter->gotcl_old = adapter->stats.gotcl;
2520
2521	e1000_update_adaptive(hw);
2522
2523	if (!netif_carrier_ok(netdev)) {
2524		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2525			/* We've lost link, so the controller stops DMA,
2526			 * but we've got queued Tx work that's never going
2527			 * to get done, so reset controller to flush Tx.
2528			 * (Do the reset outside of interrupt context).
2529			 */
2530			adapter->tx_timeout_count++;
2531			schedule_work(&adapter->reset_task);
2532			/* exit immediately since reset is imminent */
2533			return;
2534		}
2535	}
2536
2537	/* Simple mode for Interrupt Throttle Rate (ITR) */
2538	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2539		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2540		 * Total asymmetrical Tx or Rx gets ITR=8000;
2541		 * everyone else is between 2000-8000.
2542		 */
2543		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2544		u32 dif = (adapter->gotcl > adapter->gorcl ?
2545			    adapter->gotcl - adapter->gorcl :
2546			    adapter->gorcl - adapter->gotcl) / 10000;
2547		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2548
2549		ew32(ITR, 1000000000 / (itr * 256));
2550	}
2551
2552	/* Cause software interrupt to ensure rx ring is cleaned */
2553	ew32(ICS, E1000_ICS_RXDMT0);
2554
2555	/* Force detection of hung controller every watchdog period */
2556	adapter->detect_tx_hung = true;
2557
2558	/* Reschedule the task */
2559	if (!test_bit(__E1000_DOWN, &adapter->flags))
2560		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2561}
2562
2563enum latency_range {
2564	lowest_latency = 0,
2565	low_latency = 1,
2566	bulk_latency = 2,
2567	latency_invalid = 255
2568};
2569
2570/**
2571 * e1000_update_itr - update the dynamic ITR value based on statistics
2572 * @adapter: pointer to adapter
2573 * @itr_setting: current adapter->itr
2574 * @packets: the number of packets during this measurement interval
2575 * @bytes: the number of bytes during this measurement interval
2576 *
2577 *      Stores a new ITR value based on packets and byte
2578 *      counts during the last interrupt.  The advantage of per interrupt
2579 *      computation is faster updates and more accurate ITR for the current
2580 *      traffic pattern.  Constants in this function were computed
2581 *      based on theoretical maximum wire speed and thresholds were set based
2582 *      on testing data as well as attempting to minimize response time
2583 *      while increasing bulk throughput.
2584 *      this functionality is controlled by the InterruptThrottleRate module
2585 *      parameter (see e1000_param.c)
2586 **/
2587static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2588				     u16 itr_setting, int packets, int bytes)
2589{
2590	unsigned int retval = itr_setting;
2591	struct e1000_hw *hw = &adapter->hw;
2592
2593	if (unlikely(hw->mac_type < e1000_82540))
2594		goto update_itr_done;
2595
2596	if (packets == 0)
2597		goto update_itr_done;
2598
2599	switch (itr_setting) {
2600	case lowest_latency:
2601		/* jumbo frames get bulk treatment*/
2602		if (bytes/packets > 8000)
2603			retval = bulk_latency;
2604		else if ((packets < 5) && (bytes > 512))
2605			retval = low_latency;
2606		break;
2607	case low_latency:  /* 50 usec aka 20000 ints/s */
2608		if (bytes > 10000) {
2609			/* jumbo frames need bulk latency setting */
2610			if (bytes/packets > 8000)
2611				retval = bulk_latency;
2612			else if ((packets < 10) || ((bytes/packets) > 1200))
2613				retval = bulk_latency;
2614			else if ((packets > 35))
2615				retval = lowest_latency;
2616		} else if (bytes/packets > 2000)
2617			retval = bulk_latency;
2618		else if (packets <= 2 && bytes < 512)
2619			retval = lowest_latency;
2620		break;
2621	case bulk_latency: /* 250 usec aka 4000 ints/s */
2622		if (bytes > 25000) {
2623			if (packets > 35)
2624				retval = low_latency;
2625		} else if (bytes < 6000) {
2626			retval = low_latency;
2627		}
2628		break;
2629	}
2630
2631update_itr_done:
2632	return retval;
2633}
2634
2635static void e1000_set_itr(struct e1000_adapter *adapter)
2636{
2637	struct e1000_hw *hw = &adapter->hw;
2638	u16 current_itr;
2639	u32 new_itr = adapter->itr;
2640
2641	if (unlikely(hw->mac_type < e1000_82540))
2642		return;
2643
2644	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2645	if (unlikely(adapter->link_speed != SPEED_1000)) {
2646		current_itr = 0;
2647		new_itr = 4000;
2648		goto set_itr_now;
2649	}
2650
2651	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2652					   adapter->total_tx_packets,
2653					   adapter->total_tx_bytes);
2654	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2655	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2656		adapter->tx_itr = low_latency;
2657
2658	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2659					   adapter->total_rx_packets,
2660					   adapter->total_rx_bytes);
2661	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2662	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2663		adapter->rx_itr = low_latency;
2664
2665	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2666
2667	switch (current_itr) {
2668	/* counts and packets in update_itr are dependent on these numbers */
2669	case lowest_latency:
2670		new_itr = 70000;
2671		break;
2672	case low_latency:
2673		new_itr = 20000; /* aka hwitr = ~200 */
2674		break;
2675	case bulk_latency:
2676		new_itr = 4000;
2677		break;
2678	default:
2679		break;
2680	}
2681
2682set_itr_now:
2683	if (new_itr != adapter->itr) {
2684		/* this attempts to bias the interrupt rate towards Bulk
2685		 * by adding intermediate steps when interrupt rate is
2686		 * increasing
2687		 */
2688		new_itr = new_itr > adapter->itr ?
2689			  min(adapter->itr + (new_itr >> 2), new_itr) :
2690			  new_itr;
2691		adapter->itr = new_itr;
2692		ew32(ITR, 1000000000 / (new_itr * 256));
2693	}
2694}
2695
2696#define E1000_TX_FLAGS_CSUM		0x00000001
2697#define E1000_TX_FLAGS_VLAN		0x00000002
2698#define E1000_TX_FLAGS_TSO		0x00000004
2699#define E1000_TX_FLAGS_IPV4		0x00000008
2700#define E1000_TX_FLAGS_NO_FCS		0x00000010
2701#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2702#define E1000_TX_FLAGS_VLAN_SHIFT	16
2703
2704static int e1000_tso(struct e1000_adapter *adapter,
2705		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2706		     __be16 protocol)
2707{
2708	struct e1000_context_desc *context_desc;
2709	struct e1000_tx_buffer *buffer_info;
2710	unsigned int i;
2711	u32 cmd_length = 0;
2712	u16 ipcse = 0, tucse, mss;
2713	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2714
2715	if (skb_is_gso(skb)) {
2716		int err;
2717
2718		err = skb_cow_head(skb, 0);
2719		if (err < 0)
2720			return err;
2721
2722		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2723		mss = skb_shinfo(skb)->gso_size;
2724		if (protocol == htons(ETH_P_IP)) {
2725			struct iphdr *iph = ip_hdr(skb);
2726			iph->tot_len = 0;
2727			iph->check = 0;
2728			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2729								 iph->daddr, 0,
2730								 IPPROTO_TCP,
2731								 0);
2732			cmd_length = E1000_TXD_CMD_IP;
2733			ipcse = skb_transport_offset(skb) - 1;
2734		} else if (skb_is_gso_v6(skb)) {
2735			ipv6_hdr(skb)->payload_len = 0;
2736			tcp_hdr(skb)->check =
2737				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2738						 &ipv6_hdr(skb)->daddr,
2739						 0, IPPROTO_TCP, 0);
2740			ipcse = 0;
2741		}
2742		ipcss = skb_network_offset(skb);
2743		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2744		tucss = skb_transport_offset(skb);
2745		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2746		tucse = 0;
2747
2748		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2749			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2750
2751		i = tx_ring->next_to_use;
2752		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2753		buffer_info = &tx_ring->buffer_info[i];
2754
2755		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2756		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2757		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2758		context_desc->upper_setup.tcp_fields.tucss = tucss;
2759		context_desc->upper_setup.tcp_fields.tucso = tucso;
2760		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2761		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2762		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2763		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2764
2765		buffer_info->time_stamp = jiffies;
2766		buffer_info->next_to_watch = i;
2767
2768		if (++i == tx_ring->count)
2769			i = 0;
2770
2771		tx_ring->next_to_use = i;
2772
2773		return true;
2774	}
2775	return false;
2776}
2777
2778static bool e1000_tx_csum(struct e1000_adapter *adapter,
2779			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2780			  __be16 protocol)
2781{
2782	struct e1000_context_desc *context_desc;
2783	struct e1000_tx_buffer *buffer_info;
2784	unsigned int i;
2785	u8 css;
2786	u32 cmd_len = E1000_TXD_CMD_DEXT;
2787
2788	if (skb->ip_summed != CHECKSUM_PARTIAL)
2789		return false;
2790
2791	switch (protocol) {
2792	case cpu_to_be16(ETH_P_IP):
2793		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2794			cmd_len |= E1000_TXD_CMD_TCP;
2795		break;
2796	case cpu_to_be16(ETH_P_IPV6):
2797		/* XXX not handling all IPV6 headers */
2798		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2799			cmd_len |= E1000_TXD_CMD_TCP;
2800		break;
2801	default:
2802		if (unlikely(net_ratelimit()))
2803			e_warn(drv, "checksum_partial proto=%x!\n",
2804			       skb->protocol);
2805		break;
2806	}
2807
2808	css = skb_checksum_start_offset(skb);
2809
2810	i = tx_ring->next_to_use;
2811	buffer_info = &tx_ring->buffer_info[i];
2812	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2813
2814	context_desc->lower_setup.ip_config = 0;
2815	context_desc->upper_setup.tcp_fields.tucss = css;
2816	context_desc->upper_setup.tcp_fields.tucso =
2817		css + skb->csum_offset;
2818	context_desc->upper_setup.tcp_fields.tucse = 0;
2819	context_desc->tcp_seg_setup.data = 0;
2820	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2821
2822	buffer_info->time_stamp = jiffies;
2823	buffer_info->next_to_watch = i;
2824
2825	if (unlikely(++i == tx_ring->count))
2826		i = 0;
2827
2828	tx_ring->next_to_use = i;
2829
2830	return true;
2831}
2832
2833#define E1000_MAX_TXD_PWR	12
2834#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2835
2836static int e1000_tx_map(struct e1000_adapter *adapter,
2837			struct e1000_tx_ring *tx_ring,
2838			struct sk_buff *skb, unsigned int first,
2839			unsigned int max_per_txd, unsigned int nr_frags,
2840			unsigned int mss)
2841{
2842	struct e1000_hw *hw = &adapter->hw;
2843	struct pci_dev *pdev = adapter->pdev;
2844	struct e1000_tx_buffer *buffer_info;
2845	unsigned int len = skb_headlen(skb);
2846	unsigned int offset = 0, size, count = 0, i;
2847	unsigned int f, bytecount, segs;
2848
2849	i = tx_ring->next_to_use;
2850
2851	while (len) {
2852		buffer_info = &tx_ring->buffer_info[i];
2853		size = min(len, max_per_txd);
2854		/* Workaround for Controller erratum --
2855		 * descriptor for non-tso packet in a linear SKB that follows a
2856		 * tso gets written back prematurely before the data is fully
2857		 * DMA'd to the controller
2858		 */
2859		if (!skb->data_len && tx_ring->last_tx_tso &&
2860		    !skb_is_gso(skb)) {
2861			tx_ring->last_tx_tso = false;
2862			size -= 4;
2863		}
2864
2865		/* Workaround for premature desc write-backs
2866		 * in TSO mode.  Append 4-byte sentinel desc
2867		 */
2868		if (unlikely(mss && !nr_frags && size == len && size > 8))
2869			size -= 4;
2870		/* work-around for errata 10 and it applies
2871		 * to all controllers in PCI-X mode
2872		 * The fix is to make sure that the first descriptor of a
2873		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2874		 */
2875		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2876			     (size > 2015) && count == 0))
2877			size = 2015;
2878
2879		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2880		 * terminating buffers within evenly-aligned dwords.
2881		 */
2882		if (unlikely(adapter->pcix_82544 &&
2883		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2884		   size > 4))
2885			size -= 4;
2886
2887		buffer_info->length = size;
2888		/* set time_stamp *before* dma to help avoid a possible race */
2889		buffer_info->time_stamp = jiffies;
2890		buffer_info->mapped_as_page = false;
2891		buffer_info->dma = dma_map_single(&pdev->dev,
2892						  skb->data + offset,
2893						  size, DMA_TO_DEVICE);
2894		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2895			goto dma_error;
2896		buffer_info->next_to_watch = i;
2897
2898		len -= size;
2899		offset += size;
2900		count++;
2901		if (len) {
2902			i++;
2903			if (unlikely(i == tx_ring->count))
2904				i = 0;
2905		}
2906	}
2907
2908	for (f = 0; f < nr_frags; f++) {
2909		const struct skb_frag_struct *frag;
2910
2911		frag = &skb_shinfo(skb)->frags[f];
2912		len = skb_frag_size(frag);
2913		offset = 0;
2914
2915		while (len) {
2916			unsigned long bufend;
2917			i++;
2918			if (unlikely(i == tx_ring->count))
2919				i = 0;
2920
2921			buffer_info = &tx_ring->buffer_info[i];
2922			size = min(len, max_per_txd);
2923			/* Workaround for premature desc write-backs
2924			 * in TSO mode.  Append 4-byte sentinel desc
2925			 */
2926			if (unlikely(mss && f == (nr_frags-1) &&
2927			    size == len && size > 8))
2928				size -= 4;
2929			/* Workaround for potential 82544 hang in PCI-X.
2930			 * Avoid terminating buffers within evenly-aligned
2931			 * dwords.
2932			 */
2933			bufend = (unsigned long)
2934				page_to_phys(skb_frag_page(frag));
2935			bufend += offset + size - 1;
2936			if (unlikely(adapter->pcix_82544 &&
2937				     !(bufend & 4) &&
2938				     size > 4))
2939				size -= 4;
2940
2941			buffer_info->length = size;
2942			buffer_info->time_stamp = jiffies;
2943			buffer_info->mapped_as_page = true;
2944			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2945						offset, size, DMA_TO_DEVICE);
2946			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2947				goto dma_error;
2948			buffer_info->next_to_watch = i;
2949
2950			len -= size;
2951			offset += size;
2952			count++;
2953		}
2954	}
2955
2956	segs = skb_shinfo(skb)->gso_segs ?: 1;
2957	/* multiply data chunks by size of headers */
2958	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2959
2960	tx_ring->buffer_info[i].skb = skb;
2961	tx_ring->buffer_info[i].segs = segs;
2962	tx_ring->buffer_info[i].bytecount = bytecount;
2963	tx_ring->buffer_info[first].next_to_watch = i;
2964
2965	return count;
2966
2967dma_error:
2968	dev_err(&pdev->dev, "TX DMA map failed\n");
2969	buffer_info->dma = 0;
2970	if (count)
2971		count--;
2972
2973	while (count--) {
2974		if (i == 0)
2975			i += tx_ring->count;
2976		i--;
2977		buffer_info = &tx_ring->buffer_info[i];
2978		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2979	}
2980
2981	return 0;
2982}
2983
2984static void e1000_tx_queue(struct e1000_adapter *adapter,
2985			   struct e1000_tx_ring *tx_ring, int tx_flags,
2986			   int count)
2987{
2988	struct e1000_tx_desc *tx_desc = NULL;
2989	struct e1000_tx_buffer *buffer_info;
2990	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2991	unsigned int i;
2992
2993	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2994		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2995			     E1000_TXD_CMD_TSE;
2996		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2997
2998		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2999			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3000	}
3001
3002	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3003		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3004		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3005	}
3006
3007	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3008		txd_lower |= E1000_TXD_CMD_VLE;
3009		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3010	}
3011
3012	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3013		txd_lower &= ~(E1000_TXD_CMD_IFCS);
3014
3015	i = tx_ring->next_to_use;
3016
3017	while (count--) {
3018		buffer_info = &tx_ring->buffer_info[i];
3019		tx_desc = E1000_TX_DESC(*tx_ring, i);
3020		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3021		tx_desc->lower.data =
3022			cpu_to_le32(txd_lower | buffer_info->length);
3023		tx_desc->upper.data = cpu_to_le32(txd_upper);
3024		if (unlikely(++i == tx_ring->count))
3025			i = 0;
3026	}
3027
3028	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3029
3030	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3031	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3032		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3033
3034	/* Force memory writes to complete before letting h/w
3035	 * know there are new descriptors to fetch.  (Only
3036	 * applicable for weak-ordered memory model archs,
3037	 * such as IA-64).
3038	 */
3039	wmb();
3040
3041	tx_ring->next_to_use = i;
3042}
3043
3044/* 82547 workaround to avoid controller hang in half-duplex environment.
3045 * The workaround is to avoid queuing a large packet that would span
3046 * the internal Tx FIFO ring boundary by notifying the stack to resend
3047 * the packet at a later time.  This gives the Tx FIFO an opportunity to
3048 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3049 * to the beginning of the Tx FIFO.
3050 */
3051
3052#define E1000_FIFO_HDR			0x10
3053#define E1000_82547_PAD_LEN		0x3E0
3054
3055static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3056				       struct sk_buff *skb)
3057{
3058	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3059	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3060
3061	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3062
3063	if (adapter->link_duplex != HALF_DUPLEX)
3064		goto no_fifo_stall_required;
3065
3066	if (atomic_read(&adapter->tx_fifo_stall))
3067		return 1;
3068
3069	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3070		atomic_set(&adapter->tx_fifo_stall, 1);
3071		return 1;
3072	}
3073
3074no_fifo_stall_required:
3075	adapter->tx_fifo_head += skb_fifo_len;
3076	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3077		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3078	return 0;
3079}
3080
3081static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3082{
3083	struct e1000_adapter *adapter = netdev_priv(netdev);
3084	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3085
3086	netif_stop_queue(netdev);
3087	/* Herbert's original patch had:
3088	 *  smp_mb__after_netif_stop_queue();
3089	 * but since that doesn't exist yet, just open code it.
3090	 */
3091	smp_mb();
3092
3093	/* We need to check again in a case another CPU has just
3094	 * made room available.
3095	 */
3096	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3097		return -EBUSY;
3098
3099	/* A reprieve! */
3100	netif_start_queue(netdev);
3101	++adapter->restart_queue;
3102	return 0;
3103}
3104
3105static int e1000_maybe_stop_tx(struct net_device *netdev,
3106			       struct e1000_tx_ring *tx_ring, int size)
3107{
3108	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3109		return 0;
3110	return __e1000_maybe_stop_tx(netdev, size);
3111}
3112
3113#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3114static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3115				    struct net_device *netdev)
3116{
3117	struct e1000_adapter *adapter = netdev_priv(netdev);
3118	struct e1000_hw *hw = &adapter->hw;
3119	struct e1000_tx_ring *tx_ring;
3120	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3121	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3122	unsigned int tx_flags = 0;
3123	unsigned int len = skb_headlen(skb);
3124	unsigned int nr_frags;
3125	unsigned int mss;
3126	int count = 0;
3127	int tso;
3128	unsigned int f;
3129	__be16 protocol = vlan_get_protocol(skb);
3130
3131	/* This goes back to the question of how to logically map a Tx queue
3132	 * to a flow.  Right now, performance is impacted slightly negatively
3133	 * if using multiple Tx queues.  If the stack breaks away from a
3134	 * single qdisc implementation, we can look at this again.
3135	 */
3136	tx_ring = adapter->tx_ring;
3137
3138	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3139	 * packets may get corrupted during padding by HW.
3140	 * To WA this issue, pad all small packets manually.
3141	 */
3142	if (eth_skb_pad(skb))
3143		return NETDEV_TX_OK;
3144
3145	mss = skb_shinfo(skb)->gso_size;
3146	/* The controller does a simple calculation to
3147	 * make sure there is enough room in the FIFO before
3148	 * initiating the DMA for each buffer.  The calc is:
3149	 * 4 = ceil(buffer len/mss).  To make sure we don't
3150	 * overrun the FIFO, adjust the max buffer len if mss
3151	 * drops.
3152	 */
3153	if (mss) {
3154		u8 hdr_len;
3155		max_per_txd = min(mss << 2, max_per_txd);
3156		max_txd_pwr = fls(max_per_txd) - 1;
3157
3158		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3159		if (skb->data_len && hdr_len == len) {
3160			switch (hw->mac_type) {
 
3161				unsigned int pull_size;
3162			case e1000_82544:
3163				/* Make sure we have room to chop off 4 bytes,
3164				 * and that the end alignment will work out to
3165				 * this hardware's requirements
3166				 * NOTE: this is a TSO only workaround
3167				 * if end byte alignment not correct move us
3168				 * into the next dword
3169				 */
3170				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3171				    & 4)
3172					break;
3173				/* fall through */
3174				pull_size = min((unsigned int)4, skb->data_len);
3175				if (!__pskb_pull_tail(skb, pull_size)) {
3176					e_err(drv, "__pskb_pull_tail "
3177					      "failed.\n");
3178					dev_kfree_skb_any(skb);
3179					return NETDEV_TX_OK;
3180				}
3181				len = skb_headlen(skb);
3182				break;
 
3183			default:
3184				/* do nothing */
3185				break;
3186			}
3187		}
3188	}
3189
3190	/* reserve a descriptor for the offload context */
3191	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3192		count++;
3193	count++;
3194
3195	/* Controller Erratum workaround */
3196	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3197		count++;
3198
3199	count += TXD_USE_COUNT(len, max_txd_pwr);
3200
3201	if (adapter->pcix_82544)
3202		count++;
3203
3204	/* work-around for errata 10 and it applies to all controllers
3205	 * in PCI-X mode, so add one more descriptor to the count
3206	 */
3207	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3208			(len > 2015)))
3209		count++;
3210
3211	nr_frags = skb_shinfo(skb)->nr_frags;
3212	for (f = 0; f < nr_frags; f++)
3213		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3214				       max_txd_pwr);
3215	if (adapter->pcix_82544)
3216		count += nr_frags;
3217
3218	/* need: count + 2 desc gap to keep tail from touching
3219	 * head, otherwise try next time
3220	 */
3221	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3222		return NETDEV_TX_BUSY;
3223
3224	if (unlikely((hw->mac_type == e1000_82547) &&
3225		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3226		netif_stop_queue(netdev);
3227		if (!test_bit(__E1000_DOWN, &adapter->flags))
3228			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3229		return NETDEV_TX_BUSY;
3230	}
3231
3232	if (skb_vlan_tag_present(skb)) {
3233		tx_flags |= E1000_TX_FLAGS_VLAN;
3234		tx_flags |= (skb_vlan_tag_get(skb) <<
3235			     E1000_TX_FLAGS_VLAN_SHIFT);
3236	}
3237
3238	first = tx_ring->next_to_use;
3239
3240	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3241	if (tso < 0) {
3242		dev_kfree_skb_any(skb);
3243		return NETDEV_TX_OK;
3244	}
3245
3246	if (likely(tso)) {
3247		if (likely(hw->mac_type != e1000_82544))
3248			tx_ring->last_tx_tso = true;
3249		tx_flags |= E1000_TX_FLAGS_TSO;
3250	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3251		tx_flags |= E1000_TX_FLAGS_CSUM;
3252
3253	if (protocol == htons(ETH_P_IP))
3254		tx_flags |= E1000_TX_FLAGS_IPV4;
3255
3256	if (unlikely(skb->no_fcs))
3257		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3258
3259	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3260			     nr_frags, mss);
3261
3262	if (count) {
3263		/* The descriptors needed is higher than other Intel drivers
3264		 * due to a number of workarounds.  The breakdown is below:
3265		 * Data descriptors: MAX_SKB_FRAGS + 1
3266		 * Context Descriptor: 1
3267		 * Keep head from touching tail: 2
3268		 * Workarounds: 3
3269		 */
3270		int desc_needed = MAX_SKB_FRAGS + 7;
3271
3272		netdev_sent_queue(netdev, skb->len);
3273		skb_tx_timestamp(skb);
3274
3275		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3276
3277		/* 82544 potentially requires twice as many data descriptors
3278		 * in order to guarantee buffers don't end on evenly-aligned
3279		 * dwords
3280		 */
3281		if (adapter->pcix_82544)
3282			desc_needed += MAX_SKB_FRAGS + 1;
3283
3284		/* Make sure there is space in the ring for the next send. */
3285		e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3286
3287		if (!skb->xmit_more ||
3288		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3289			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3290			/* we need this if more than one processor can write to
3291			 * our tail at a time, it synchronizes IO on IA64/Altix
3292			 * systems
3293			 */
3294			mmiowb();
3295		}
3296	} else {
3297		dev_kfree_skb_any(skb);
3298		tx_ring->buffer_info[first].time_stamp = 0;
3299		tx_ring->next_to_use = first;
3300	}
3301
3302	return NETDEV_TX_OK;
3303}
3304
3305#define NUM_REGS 38 /* 1 based count */
3306static void e1000_regdump(struct e1000_adapter *adapter)
3307{
3308	struct e1000_hw *hw = &adapter->hw;
3309	u32 regs[NUM_REGS];
3310	u32 *regs_buff = regs;
3311	int i = 0;
3312
3313	static const char * const reg_name[] = {
3314		"CTRL",  "STATUS",
3315		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3316		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3317		"TIDV", "TXDCTL", "TADV", "TARC0",
3318		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3319		"TXDCTL1", "TARC1",
3320		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3321		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3322		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3323	};
3324
3325	regs_buff[0]  = er32(CTRL);
3326	regs_buff[1]  = er32(STATUS);
3327
3328	regs_buff[2]  = er32(RCTL);
3329	regs_buff[3]  = er32(RDLEN);
3330	regs_buff[4]  = er32(RDH);
3331	regs_buff[5]  = er32(RDT);
3332	regs_buff[6]  = er32(RDTR);
3333
3334	regs_buff[7]  = er32(TCTL);
3335	regs_buff[8]  = er32(TDBAL);
3336	regs_buff[9]  = er32(TDBAH);
3337	regs_buff[10] = er32(TDLEN);
3338	regs_buff[11] = er32(TDH);
3339	regs_buff[12] = er32(TDT);
3340	regs_buff[13] = er32(TIDV);
3341	regs_buff[14] = er32(TXDCTL);
3342	regs_buff[15] = er32(TADV);
3343	regs_buff[16] = er32(TARC0);
3344
3345	regs_buff[17] = er32(TDBAL1);
3346	regs_buff[18] = er32(TDBAH1);
3347	regs_buff[19] = er32(TDLEN1);
3348	regs_buff[20] = er32(TDH1);
3349	regs_buff[21] = er32(TDT1);
3350	regs_buff[22] = er32(TXDCTL1);
3351	regs_buff[23] = er32(TARC1);
3352	regs_buff[24] = er32(CTRL_EXT);
3353	regs_buff[25] = er32(ERT);
3354	regs_buff[26] = er32(RDBAL0);
3355	regs_buff[27] = er32(RDBAH0);
3356	regs_buff[28] = er32(TDFH);
3357	regs_buff[29] = er32(TDFT);
3358	regs_buff[30] = er32(TDFHS);
3359	regs_buff[31] = er32(TDFTS);
3360	regs_buff[32] = er32(TDFPC);
3361	regs_buff[33] = er32(RDFH);
3362	regs_buff[34] = er32(RDFT);
3363	regs_buff[35] = er32(RDFHS);
3364	regs_buff[36] = er32(RDFTS);
3365	regs_buff[37] = er32(RDFPC);
3366
3367	pr_info("Register dump\n");
3368	for (i = 0; i < NUM_REGS; i++)
3369		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3370}
3371
3372/*
3373 * e1000_dump: Print registers, tx ring and rx ring
3374 */
3375static void e1000_dump(struct e1000_adapter *adapter)
3376{
3377	/* this code doesn't handle multiple rings */
3378	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3379	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3380	int i;
3381
3382	if (!netif_msg_hw(adapter))
3383		return;
3384
3385	/* Print Registers */
3386	e1000_regdump(adapter);
3387
3388	/* transmit dump */
3389	pr_info("TX Desc ring0 dump\n");
3390
3391	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3392	 *
3393	 * Legacy Transmit Descriptor
3394	 *   +--------------------------------------------------------------+
3395	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3396	 *   +--------------------------------------------------------------+
3397	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3398	 *   +--------------------------------------------------------------+
3399	 *   63       48 47        36 35    32 31     24 23    16 15        0
3400	 *
3401	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3402	 *   63      48 47    40 39       32 31             16 15    8 7      0
3403	 *   +----------------------------------------------------------------+
3404	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3405	 *   +----------------------------------------------------------------+
3406	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3407	 *   +----------------------------------------------------------------+
3408	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3409	 *
3410	 * Extended Data Descriptor (DTYP=0x1)
3411	 *   +----------------------------------------------------------------+
3412	 * 0 |                     Buffer Address [63:0]                      |
3413	 *   +----------------------------------------------------------------+
3414	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3415	 *   +----------------------------------------------------------------+
3416	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3417	 */
3418	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3419	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3420
3421	if (!netif_msg_tx_done(adapter))
3422		goto rx_ring_summary;
3423
3424	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3425		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3426		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3427		struct my_u { __le64 a; __le64 b; };
3428		struct my_u *u = (struct my_u *)tx_desc;
3429		const char *type;
3430
3431		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3432			type = "NTC/U";
3433		else if (i == tx_ring->next_to_use)
3434			type = "NTU";
3435		else if (i == tx_ring->next_to_clean)
3436			type = "NTC";
3437		else
3438			type = "";
3439
3440		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3441			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3442			le64_to_cpu(u->a), le64_to_cpu(u->b),
3443			(u64)buffer_info->dma, buffer_info->length,
3444			buffer_info->next_to_watch,
3445			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3446	}
3447
3448rx_ring_summary:
3449	/* receive dump */
3450	pr_info("\nRX Desc ring dump\n");
3451
3452	/* Legacy Receive Descriptor Format
3453	 *
3454	 * +-----------------------------------------------------+
3455	 * |                Buffer Address [63:0]                |
3456	 * +-----------------------------------------------------+
3457	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3458	 * +-----------------------------------------------------+
3459	 * 63       48 47    40 39      32 31         16 15      0
3460	 */
3461	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3462
3463	if (!netif_msg_rx_status(adapter))
3464		goto exit;
3465
3466	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3467		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3468		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3469		struct my_u { __le64 a; __le64 b; };
3470		struct my_u *u = (struct my_u *)rx_desc;
3471		const char *type;
3472
3473		if (i == rx_ring->next_to_use)
3474			type = "NTU";
3475		else if (i == rx_ring->next_to_clean)
3476			type = "NTC";
3477		else
3478			type = "";
3479
3480		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3481			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3482			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3483	} /* for */
3484
3485	/* dump the descriptor caches */
3486	/* rx */
3487	pr_info("Rx descriptor cache in 64bit format\n");
3488	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3489		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3490			i,
3491			readl(adapter->hw.hw_addr + i+4),
3492			readl(adapter->hw.hw_addr + i),
3493			readl(adapter->hw.hw_addr + i+12),
3494			readl(adapter->hw.hw_addr + i+8));
3495	}
3496	/* tx */
3497	pr_info("Tx descriptor cache in 64bit format\n");
3498	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3499		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3500			i,
3501			readl(adapter->hw.hw_addr + i+4),
3502			readl(adapter->hw.hw_addr + i),
3503			readl(adapter->hw.hw_addr + i+12),
3504			readl(adapter->hw.hw_addr + i+8));
3505	}
3506exit:
3507	return;
3508}
3509
3510/**
3511 * e1000_tx_timeout - Respond to a Tx Hang
3512 * @netdev: network interface device structure
 
3513 **/
3514static void e1000_tx_timeout(struct net_device *netdev)
3515{
3516	struct e1000_adapter *adapter = netdev_priv(netdev);
3517
3518	/* Do the reset outside of interrupt context */
3519	adapter->tx_timeout_count++;
3520	schedule_work(&adapter->reset_task);
3521}
3522
3523static void e1000_reset_task(struct work_struct *work)
3524{
3525	struct e1000_adapter *adapter =
3526		container_of(work, struct e1000_adapter, reset_task);
3527
3528	e_err(drv, "Reset adapter\n");
3529	e1000_reinit_locked(adapter);
3530}
3531
3532/**
3533 * e1000_get_stats - Get System Network Statistics
3534 * @netdev: network interface device structure
3535 *
3536 * Returns the address of the device statistics structure.
3537 * The statistics are actually updated from the watchdog.
3538 **/
3539static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3540{
3541	/* only return the current stats */
3542	return &netdev->stats;
3543}
3544
3545/**
3546 * e1000_change_mtu - Change the Maximum Transfer Unit
3547 * @netdev: network interface device structure
3548 * @new_mtu: new value for maximum frame size
3549 *
3550 * Returns 0 on success, negative on failure
3551 **/
3552static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3553{
3554	struct e1000_adapter *adapter = netdev_priv(netdev);
3555	struct e1000_hw *hw = &adapter->hw;
3556	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3557
3558	/* Adapter-specific max frame size limits. */
3559	switch (hw->mac_type) {
3560	case e1000_undefined ... e1000_82542_rev2_1:
3561		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3562			e_err(probe, "Jumbo Frames not supported.\n");
3563			return -EINVAL;
3564		}
3565		break;
3566	default:
3567		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3568		break;
3569	}
3570
3571	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3572		msleep(1);
3573	/* e1000_down has a dependency on max_frame_size */
3574	hw->max_frame_size = max_frame;
3575	if (netif_running(netdev)) {
3576		/* prevent buffers from being reallocated */
3577		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3578		e1000_down(adapter);
3579	}
3580
3581	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3582	 * means we reserve 2 more, this pushes us to allocate from the next
3583	 * larger slab size.
3584	 * i.e. RXBUFFER_2048 --> size-4096 slab
3585	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3586	 * fragmented skbs
3587	 */
3588
3589	if (max_frame <= E1000_RXBUFFER_2048)
3590		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3591	else
3592#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3593		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3594#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3595		adapter->rx_buffer_len = PAGE_SIZE;
3596#endif
3597
3598	/* adjust allocation if LPE protects us, and we aren't using SBP */
3599	if (!hw->tbi_compatibility_on &&
3600	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3601	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3602		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3603
3604	pr_info("%s changing MTU from %d to %d\n",
3605		netdev->name, netdev->mtu, new_mtu);
3606	netdev->mtu = new_mtu;
3607
3608	if (netif_running(netdev))
3609		e1000_up(adapter);
3610	else
3611		e1000_reset(adapter);
3612
3613	clear_bit(__E1000_RESETTING, &adapter->flags);
3614
3615	return 0;
3616}
3617
3618/**
3619 * e1000_update_stats - Update the board statistics counters
3620 * @adapter: board private structure
3621 **/
3622void e1000_update_stats(struct e1000_adapter *adapter)
3623{
3624	struct net_device *netdev = adapter->netdev;
3625	struct e1000_hw *hw = &adapter->hw;
3626	struct pci_dev *pdev = adapter->pdev;
3627	unsigned long flags;
3628	u16 phy_tmp;
3629
3630#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3631
3632	/* Prevent stats update while adapter is being reset, or if the pci
3633	 * connection is down.
3634	 */
3635	if (adapter->link_speed == 0)
3636		return;
3637	if (pci_channel_offline(pdev))
3638		return;
3639
3640	spin_lock_irqsave(&adapter->stats_lock, flags);
3641
3642	/* these counters are modified from e1000_tbi_adjust_stats,
3643	 * called from the interrupt context, so they must only
3644	 * be written while holding adapter->stats_lock
3645	 */
3646
3647	adapter->stats.crcerrs += er32(CRCERRS);
3648	adapter->stats.gprc += er32(GPRC);
3649	adapter->stats.gorcl += er32(GORCL);
3650	adapter->stats.gorch += er32(GORCH);
3651	adapter->stats.bprc += er32(BPRC);
3652	adapter->stats.mprc += er32(MPRC);
3653	adapter->stats.roc += er32(ROC);
3654
3655	adapter->stats.prc64 += er32(PRC64);
3656	adapter->stats.prc127 += er32(PRC127);
3657	adapter->stats.prc255 += er32(PRC255);
3658	adapter->stats.prc511 += er32(PRC511);
3659	adapter->stats.prc1023 += er32(PRC1023);
3660	adapter->stats.prc1522 += er32(PRC1522);
3661
3662	adapter->stats.symerrs += er32(SYMERRS);
3663	adapter->stats.mpc += er32(MPC);
3664	adapter->stats.scc += er32(SCC);
3665	adapter->stats.ecol += er32(ECOL);
3666	adapter->stats.mcc += er32(MCC);
3667	adapter->stats.latecol += er32(LATECOL);
3668	adapter->stats.dc += er32(DC);
3669	adapter->stats.sec += er32(SEC);
3670	adapter->stats.rlec += er32(RLEC);
3671	adapter->stats.xonrxc += er32(XONRXC);
3672	adapter->stats.xontxc += er32(XONTXC);
3673	adapter->stats.xoffrxc += er32(XOFFRXC);
3674	adapter->stats.xofftxc += er32(XOFFTXC);
3675	adapter->stats.fcruc += er32(FCRUC);
3676	adapter->stats.gptc += er32(GPTC);
3677	adapter->stats.gotcl += er32(GOTCL);
3678	adapter->stats.gotch += er32(GOTCH);
3679	adapter->stats.rnbc += er32(RNBC);
3680	adapter->stats.ruc += er32(RUC);
3681	adapter->stats.rfc += er32(RFC);
3682	adapter->stats.rjc += er32(RJC);
3683	adapter->stats.torl += er32(TORL);
3684	adapter->stats.torh += er32(TORH);
3685	adapter->stats.totl += er32(TOTL);
3686	adapter->stats.toth += er32(TOTH);
3687	adapter->stats.tpr += er32(TPR);
3688
3689	adapter->stats.ptc64 += er32(PTC64);
3690	adapter->stats.ptc127 += er32(PTC127);
3691	adapter->stats.ptc255 += er32(PTC255);
3692	adapter->stats.ptc511 += er32(PTC511);
3693	adapter->stats.ptc1023 += er32(PTC1023);
3694	adapter->stats.ptc1522 += er32(PTC1522);
3695
3696	adapter->stats.mptc += er32(MPTC);
3697	adapter->stats.bptc += er32(BPTC);
3698
3699	/* used for adaptive IFS */
3700
3701	hw->tx_packet_delta = er32(TPT);
3702	adapter->stats.tpt += hw->tx_packet_delta;
3703	hw->collision_delta = er32(COLC);
3704	adapter->stats.colc += hw->collision_delta;
3705
3706	if (hw->mac_type >= e1000_82543) {
3707		adapter->stats.algnerrc += er32(ALGNERRC);
3708		adapter->stats.rxerrc += er32(RXERRC);
3709		adapter->stats.tncrs += er32(TNCRS);
3710		adapter->stats.cexterr += er32(CEXTERR);
3711		adapter->stats.tsctc += er32(TSCTC);
3712		adapter->stats.tsctfc += er32(TSCTFC);
3713	}
3714
3715	/* Fill out the OS statistics structure */
3716	netdev->stats.multicast = adapter->stats.mprc;
3717	netdev->stats.collisions = adapter->stats.colc;
3718
3719	/* Rx Errors */
3720
3721	/* RLEC on some newer hardware can be incorrect so build
3722	 * our own version based on RUC and ROC
3723	 */
3724	netdev->stats.rx_errors = adapter->stats.rxerrc +
3725		adapter->stats.crcerrs + adapter->stats.algnerrc +
3726		adapter->stats.ruc + adapter->stats.roc +
3727		adapter->stats.cexterr;
3728	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3729	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3730	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3731	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3732	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3733
3734	/* Tx Errors */
3735	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3736	netdev->stats.tx_errors = adapter->stats.txerrc;
3737	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3738	netdev->stats.tx_window_errors = adapter->stats.latecol;
3739	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3740	if (hw->bad_tx_carr_stats_fd &&
3741	    adapter->link_duplex == FULL_DUPLEX) {
3742		netdev->stats.tx_carrier_errors = 0;
3743		adapter->stats.tncrs = 0;
3744	}
3745
3746	/* Tx Dropped needs to be maintained elsewhere */
3747
3748	/* Phy Stats */
3749	if (hw->media_type == e1000_media_type_copper) {
3750		if ((adapter->link_speed == SPEED_1000) &&
3751		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3752			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3753			adapter->phy_stats.idle_errors += phy_tmp;
3754		}
3755
3756		if ((hw->mac_type <= e1000_82546) &&
3757		   (hw->phy_type == e1000_phy_m88) &&
3758		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3759			adapter->phy_stats.receive_errors += phy_tmp;
3760	}
3761
3762	/* Management Stats */
3763	if (hw->has_smbus) {
3764		adapter->stats.mgptc += er32(MGTPTC);
3765		adapter->stats.mgprc += er32(MGTPRC);
3766		adapter->stats.mgpdc += er32(MGTPDC);
3767	}
3768
3769	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3770}
3771
3772/**
3773 * e1000_intr - Interrupt Handler
3774 * @irq: interrupt number
3775 * @data: pointer to a network interface device structure
3776 **/
3777static irqreturn_t e1000_intr(int irq, void *data)
3778{
3779	struct net_device *netdev = data;
3780	struct e1000_adapter *adapter = netdev_priv(netdev);
3781	struct e1000_hw *hw = &adapter->hw;
3782	u32 icr = er32(ICR);
3783
3784	if (unlikely((!icr)))
3785		return IRQ_NONE;  /* Not our interrupt */
3786
3787	/* we might have caused the interrupt, but the above
3788	 * read cleared it, and just in case the driver is
3789	 * down there is nothing to do so return handled
3790	 */
3791	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3792		return IRQ_HANDLED;
3793
3794	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3795		hw->get_link_status = 1;
3796		/* guard against interrupt when we're going down */
3797		if (!test_bit(__E1000_DOWN, &adapter->flags))
3798			schedule_delayed_work(&adapter->watchdog_task, 1);
3799	}
3800
3801	/* disable interrupts, without the synchronize_irq bit */
3802	ew32(IMC, ~0);
3803	E1000_WRITE_FLUSH();
3804
3805	if (likely(napi_schedule_prep(&adapter->napi))) {
3806		adapter->total_tx_bytes = 0;
3807		adapter->total_tx_packets = 0;
3808		adapter->total_rx_bytes = 0;
3809		adapter->total_rx_packets = 0;
3810		__napi_schedule(&adapter->napi);
3811	} else {
3812		/* this really should not happen! if it does it is basically a
3813		 * bug, but not a hard error, so enable ints and continue
3814		 */
3815		if (!test_bit(__E1000_DOWN, &adapter->flags))
3816			e1000_irq_enable(adapter);
3817	}
3818
3819	return IRQ_HANDLED;
3820}
3821
3822/**
3823 * e1000_clean - NAPI Rx polling callback
3824 * @adapter: board private structure
 
3825 **/
3826static int e1000_clean(struct napi_struct *napi, int budget)
3827{
3828	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3829						     napi);
3830	int tx_clean_complete = 0, work_done = 0;
3831
3832	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3833
3834	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3835
3836	if (!tx_clean_complete)
3837		work_done = budget;
3838
3839	/* If budget not fully consumed, exit the polling mode */
3840	if (work_done < budget) {
 
 
3841		if (likely(adapter->itr_setting & 3))
3842			e1000_set_itr(adapter);
3843		napi_complete_done(napi, work_done);
3844		if (!test_bit(__E1000_DOWN, &adapter->flags))
3845			e1000_irq_enable(adapter);
3846	}
3847
3848	return work_done;
3849}
3850
3851/**
3852 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3853 * @adapter: board private structure
 
3854 **/
3855static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3856			       struct e1000_tx_ring *tx_ring)
3857{
3858	struct e1000_hw *hw = &adapter->hw;
3859	struct net_device *netdev = adapter->netdev;
3860	struct e1000_tx_desc *tx_desc, *eop_desc;
3861	struct e1000_tx_buffer *buffer_info;
3862	unsigned int i, eop;
3863	unsigned int count = 0;
3864	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3865	unsigned int bytes_compl = 0, pkts_compl = 0;
3866
3867	i = tx_ring->next_to_clean;
3868	eop = tx_ring->buffer_info[i].next_to_watch;
3869	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870
3871	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3872	       (count < tx_ring->count)) {
3873		bool cleaned = false;
3874		dma_rmb();	/* read buffer_info after eop_desc */
3875		for ( ; !cleaned; count++) {
3876			tx_desc = E1000_TX_DESC(*tx_ring, i);
3877			buffer_info = &tx_ring->buffer_info[i];
3878			cleaned = (i == eop);
3879
3880			if (cleaned) {
3881				total_tx_packets += buffer_info->segs;
3882				total_tx_bytes += buffer_info->bytecount;
3883				if (buffer_info->skb) {
3884					bytes_compl += buffer_info->skb->len;
3885					pkts_compl++;
3886				}
3887
3888			}
3889			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
 
3890			tx_desc->upper.data = 0;
3891
3892			if (unlikely(++i == tx_ring->count))
3893				i = 0;
3894		}
3895
3896		eop = tx_ring->buffer_info[i].next_to_watch;
3897		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3898	}
3899
3900	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3901	 * which will reuse the cleaned buffers.
3902	 */
3903	smp_store_release(&tx_ring->next_to_clean, i);
3904
3905	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3906
3907#define TX_WAKE_THRESHOLD 32
3908	if (unlikely(count && netif_carrier_ok(netdev) &&
3909		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3910		/* Make sure that anybody stopping the queue after this
3911		 * sees the new next_to_clean.
3912		 */
3913		smp_mb();
3914
3915		if (netif_queue_stopped(netdev) &&
3916		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3917			netif_wake_queue(netdev);
3918			++adapter->restart_queue;
3919		}
3920	}
3921
3922	if (adapter->detect_tx_hung) {
3923		/* Detect a transmit hang in hardware, this serializes the
3924		 * check with the clearing of time_stamp and movement of i
3925		 */
3926		adapter->detect_tx_hung = false;
3927		if (tx_ring->buffer_info[eop].time_stamp &&
3928		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3929			       (adapter->tx_timeout_factor * HZ)) &&
3930		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3931
3932			/* detected Tx unit hang */
3933			e_err(drv, "Detected Tx Unit Hang\n"
3934			      "  Tx Queue             <%lu>\n"
3935			      "  TDH                  <%x>\n"
3936			      "  TDT                  <%x>\n"
3937			      "  next_to_use          <%x>\n"
3938			      "  next_to_clean        <%x>\n"
3939			      "buffer_info[next_to_clean]\n"
3940			      "  time_stamp           <%lx>\n"
3941			      "  next_to_watch        <%x>\n"
3942			      "  jiffies              <%lx>\n"
3943			      "  next_to_watch.status <%x>\n",
3944				(unsigned long)(tx_ring - adapter->tx_ring),
3945				readl(hw->hw_addr + tx_ring->tdh),
3946				readl(hw->hw_addr + tx_ring->tdt),
3947				tx_ring->next_to_use,
3948				tx_ring->next_to_clean,
3949				tx_ring->buffer_info[eop].time_stamp,
3950				eop,
3951				jiffies,
3952				eop_desc->upper.fields.status);
3953			e1000_dump(adapter);
3954			netif_stop_queue(netdev);
3955		}
3956	}
3957	adapter->total_tx_bytes += total_tx_bytes;
3958	adapter->total_tx_packets += total_tx_packets;
3959	netdev->stats.tx_bytes += total_tx_bytes;
3960	netdev->stats.tx_packets += total_tx_packets;
3961	return count < tx_ring->count;
3962}
3963
3964/**
3965 * e1000_rx_checksum - Receive Checksum Offload for 82543
3966 * @adapter:     board private structure
3967 * @status_err:  receive descriptor status and error fields
3968 * @csum:        receive descriptor csum field
3969 * @sk_buff:     socket buffer with received data
3970 **/
3971static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3972			      u32 csum, struct sk_buff *skb)
3973{
3974	struct e1000_hw *hw = &adapter->hw;
3975	u16 status = (u16)status_err;
3976	u8 errors = (u8)(status_err >> 24);
3977
3978	skb_checksum_none_assert(skb);
3979
3980	/* 82543 or newer only */
3981	if (unlikely(hw->mac_type < e1000_82543))
3982		return;
3983	/* Ignore Checksum bit is set */
3984	if (unlikely(status & E1000_RXD_STAT_IXSM))
3985		return;
3986	/* TCP/UDP checksum error bit is set */
3987	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3988		/* let the stack verify checksum errors */
3989		adapter->hw_csum_err++;
3990		return;
3991	}
3992	/* TCP/UDP Checksum has not been calculated */
3993	if (!(status & E1000_RXD_STAT_TCPCS))
3994		return;
3995
3996	/* It must be a TCP or UDP packet with a valid checksum */
3997	if (likely(status & E1000_RXD_STAT_TCPCS)) {
3998		/* TCP checksum is good */
3999		skb->ip_summed = CHECKSUM_UNNECESSARY;
4000	}
4001	adapter->hw_csum_good++;
4002}
4003
4004/**
4005 * e1000_consume_page - helper function for jumbo Rx path
 
 
 
4006 **/
4007static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4008			       u16 length)
4009{
4010	bi->rxbuf.page = NULL;
4011	skb->len += length;
4012	skb->data_len += length;
4013	skb->truesize += PAGE_SIZE;
4014}
4015
4016/**
4017 * e1000_receive_skb - helper function to handle rx indications
4018 * @adapter: board private structure
4019 * @status: descriptor status field as written by hardware
4020 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4021 * @skb: pointer to sk_buff to be indicated to stack
4022 */
4023static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4024			      __le16 vlan, struct sk_buff *skb)
4025{
4026	skb->protocol = eth_type_trans(skb, adapter->netdev);
4027
4028	if (status & E1000_RXD_STAT_VP) {
4029		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4030
4031		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4032	}
4033	napi_gro_receive(&adapter->napi, skb);
4034}
4035
4036/**
4037 * e1000_tbi_adjust_stats
4038 * @hw: Struct containing variables accessed by shared code
 
4039 * @frame_len: The length of the frame in question
4040 * @mac_addr: The Ethernet destination address of the frame in question
4041 *
4042 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4043 */
4044static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4045				   struct e1000_hw_stats *stats,
4046				   u32 frame_len, const u8 *mac_addr)
4047{
4048	u64 carry_bit;
4049
4050	/* First adjust the frame length. */
4051	frame_len--;
4052	/* We need to adjust the statistics counters, since the hardware
4053	 * counters overcount this packet as a CRC error and undercount
4054	 * the packet as a good packet
4055	 */
4056	/* This packet should not be counted as a CRC error. */
4057	stats->crcerrs--;
4058	/* This packet does count as a Good Packet Received. */
4059	stats->gprc++;
4060
4061	/* Adjust the Good Octets received counters */
4062	carry_bit = 0x80000000 & stats->gorcl;
4063	stats->gorcl += frame_len;
4064	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4065	 * Received Count) was one before the addition,
4066	 * AND it is zero after, then we lost the carry out,
4067	 * need to add one to Gorch (Good Octets Received Count High).
4068	 * This could be simplified if all environments supported
4069	 * 64-bit integers.
4070	 */
4071	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4072		stats->gorch++;
4073	/* Is this a broadcast or multicast?  Check broadcast first,
4074	 * since the test for a multicast frame will test positive on
4075	 * a broadcast frame.
4076	 */
4077	if (is_broadcast_ether_addr(mac_addr))
4078		stats->bprc++;
4079	else if (is_multicast_ether_addr(mac_addr))
4080		stats->mprc++;
4081
4082	if (frame_len == hw->max_frame_size) {
4083		/* In this case, the hardware has overcounted the number of
4084		 * oversize frames.
4085		 */
4086		if (stats->roc > 0)
4087			stats->roc--;
4088	}
4089
4090	/* Adjust the bin counters when the extra byte put the frame in the
4091	 * wrong bin. Remember that the frame_len was adjusted above.
4092	 */
4093	if (frame_len == 64) {
4094		stats->prc64++;
4095		stats->prc127--;
4096	} else if (frame_len == 127) {
4097		stats->prc127++;
4098		stats->prc255--;
4099	} else if (frame_len == 255) {
4100		stats->prc255++;
4101		stats->prc511--;
4102	} else if (frame_len == 511) {
4103		stats->prc511++;
4104		stats->prc1023--;
4105	} else if (frame_len == 1023) {
4106		stats->prc1023++;
4107		stats->prc1522--;
4108	} else if (frame_len == 1522) {
4109		stats->prc1522++;
4110	}
4111}
4112
4113static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4114				    u8 status, u8 errors,
4115				    u32 length, const u8 *data)
4116{
4117	struct e1000_hw *hw = &adapter->hw;
4118	u8 last_byte = *(data + length - 1);
4119
4120	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4121		unsigned long irq_flags;
4122
4123		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4124		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4125		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4126
4127		return true;
4128	}
4129
4130	return false;
4131}
4132
4133static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4134					  unsigned int bufsz)
4135{
4136	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4137
4138	if (unlikely(!skb))
4139		adapter->alloc_rx_buff_failed++;
4140	return skb;
4141}
4142
4143/**
4144 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4145 * @adapter: board private structure
4146 * @rx_ring: ring to clean
4147 * @work_done: amount of napi work completed this call
4148 * @work_to_do: max amount of work allowed for this call to do
4149 *
4150 * the return value indicates whether actual cleaning was done, there
4151 * is no guarantee that everything was cleaned
4152 */
4153static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4154				     struct e1000_rx_ring *rx_ring,
4155				     int *work_done, int work_to_do)
4156{
4157	struct net_device *netdev = adapter->netdev;
4158	struct pci_dev *pdev = adapter->pdev;
4159	struct e1000_rx_desc *rx_desc, *next_rxd;
4160	struct e1000_rx_buffer *buffer_info, *next_buffer;
4161	u32 length;
4162	unsigned int i;
4163	int cleaned_count = 0;
4164	bool cleaned = false;
4165	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4166
4167	i = rx_ring->next_to_clean;
4168	rx_desc = E1000_RX_DESC(*rx_ring, i);
4169	buffer_info = &rx_ring->buffer_info[i];
4170
4171	while (rx_desc->status & E1000_RXD_STAT_DD) {
4172		struct sk_buff *skb;
4173		u8 status;
4174
4175		if (*work_done >= work_to_do)
4176			break;
4177		(*work_done)++;
4178		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4179
4180		status = rx_desc->status;
4181
4182		if (++i == rx_ring->count)
4183			i = 0;
4184
4185		next_rxd = E1000_RX_DESC(*rx_ring, i);
4186		prefetch(next_rxd);
4187
4188		next_buffer = &rx_ring->buffer_info[i];
4189
4190		cleaned = true;
4191		cleaned_count++;
4192		dma_unmap_page(&pdev->dev, buffer_info->dma,
4193			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4194		buffer_info->dma = 0;
4195
4196		length = le16_to_cpu(rx_desc->length);
4197
4198		/* errors is only valid for DD + EOP descriptors */
4199		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4200		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4201			u8 *mapped = page_address(buffer_info->rxbuf.page);
4202
4203			if (e1000_tbi_should_accept(adapter, status,
4204						    rx_desc->errors,
4205						    length, mapped)) {
4206				length--;
4207			} else if (netdev->features & NETIF_F_RXALL) {
4208				goto process_skb;
4209			} else {
4210				/* an error means any chain goes out the window
4211				 * too
4212				 */
4213				if (rx_ring->rx_skb_top)
4214					dev_kfree_skb(rx_ring->rx_skb_top);
4215				rx_ring->rx_skb_top = NULL;
4216				goto next_desc;
4217			}
4218		}
4219
4220#define rxtop rx_ring->rx_skb_top
4221process_skb:
4222		if (!(status & E1000_RXD_STAT_EOP)) {
4223			/* this descriptor is only the beginning (or middle) */
4224			if (!rxtop) {
4225				/* this is the beginning of a chain */
4226				rxtop = napi_get_frags(&adapter->napi);
4227				if (!rxtop)
4228					break;
4229
4230				skb_fill_page_desc(rxtop, 0,
4231						   buffer_info->rxbuf.page,
4232						   0, length);
4233			} else {
4234				/* this is the middle of a chain */
4235				skb_fill_page_desc(rxtop,
4236				    skb_shinfo(rxtop)->nr_frags,
4237				    buffer_info->rxbuf.page, 0, length);
4238			}
4239			e1000_consume_page(buffer_info, rxtop, length);
4240			goto next_desc;
4241		} else {
4242			if (rxtop) {
4243				/* end of the chain */
4244				skb_fill_page_desc(rxtop,
4245				    skb_shinfo(rxtop)->nr_frags,
4246				    buffer_info->rxbuf.page, 0, length);
4247				skb = rxtop;
4248				rxtop = NULL;
4249				e1000_consume_page(buffer_info, skb, length);
4250			} else {
4251				struct page *p;
4252				/* no chain, got EOP, this buf is the packet
4253				 * copybreak to save the put_page/alloc_page
4254				 */
4255				p = buffer_info->rxbuf.page;
4256				if (length <= copybreak) {
4257					u8 *vaddr;
4258
4259					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4260						length -= 4;
4261					skb = e1000_alloc_rx_skb(adapter,
4262								 length);
4263					if (!skb)
4264						break;
4265
4266					vaddr = kmap_atomic(p);
4267					memcpy(skb_tail_pointer(skb), vaddr,
4268					       length);
4269					kunmap_atomic(vaddr);
4270					/* re-use the page, so don't erase
4271					 * buffer_info->rxbuf.page
4272					 */
4273					skb_put(skb, length);
4274					e1000_rx_checksum(adapter,
4275							  status | rx_desc->errors << 24,
4276							  le16_to_cpu(rx_desc->csum), skb);
4277
4278					total_rx_bytes += skb->len;
4279					total_rx_packets++;
4280
4281					e1000_receive_skb(adapter, status,
4282							  rx_desc->special, skb);
4283					goto next_desc;
4284				} else {
4285					skb = napi_get_frags(&adapter->napi);
4286					if (!skb) {
4287						adapter->alloc_rx_buff_failed++;
4288						break;
4289					}
4290					skb_fill_page_desc(skb, 0, p, 0,
4291							   length);
4292					e1000_consume_page(buffer_info, skb,
4293							   length);
4294				}
4295			}
4296		}
4297
4298		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4299		e1000_rx_checksum(adapter,
4300				  (u32)(status) |
4301				  ((u32)(rx_desc->errors) << 24),
4302				  le16_to_cpu(rx_desc->csum), skb);
4303
4304		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4305		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4306			pskb_trim(skb, skb->len - 4);
4307		total_rx_packets++;
4308
4309		if (status & E1000_RXD_STAT_VP) {
4310			__le16 vlan = rx_desc->special;
4311			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4312
4313			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4314		}
4315
4316		napi_gro_frags(&adapter->napi);
4317
4318next_desc:
4319		rx_desc->status = 0;
4320
4321		/* return some buffers to hardware, one at a time is too slow */
4322		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4323			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4324			cleaned_count = 0;
4325		}
4326
4327		/* use prefetched values */
4328		rx_desc = next_rxd;
4329		buffer_info = next_buffer;
4330	}
4331	rx_ring->next_to_clean = i;
4332
4333	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4334	if (cleaned_count)
4335		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4336
4337	adapter->total_rx_packets += total_rx_packets;
4338	adapter->total_rx_bytes += total_rx_bytes;
4339	netdev->stats.rx_bytes += total_rx_bytes;
4340	netdev->stats.rx_packets += total_rx_packets;
4341	return cleaned;
4342}
4343
4344/* this should improve performance for small packets with large amounts
4345 * of reassembly being done in the stack
4346 */
4347static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4348				       struct e1000_rx_buffer *buffer_info,
4349				       u32 length, const void *data)
4350{
4351	struct sk_buff *skb;
4352
4353	if (length > copybreak)
4354		return NULL;
4355
4356	skb = e1000_alloc_rx_skb(adapter, length);
4357	if (!skb)
4358		return NULL;
4359
4360	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4361				length, DMA_FROM_DEVICE);
4362
4363	memcpy(skb_put(skb, length), data, length);
4364
4365	return skb;
4366}
4367
4368/**
4369 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4370 * @adapter: board private structure
4371 * @rx_ring: ring to clean
4372 * @work_done: amount of napi work completed this call
4373 * @work_to_do: max amount of work allowed for this call to do
4374 */
4375static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4376			       struct e1000_rx_ring *rx_ring,
4377			       int *work_done, int work_to_do)
4378{
4379	struct net_device *netdev = adapter->netdev;
4380	struct pci_dev *pdev = adapter->pdev;
4381	struct e1000_rx_desc *rx_desc, *next_rxd;
4382	struct e1000_rx_buffer *buffer_info, *next_buffer;
4383	u32 length;
4384	unsigned int i;
4385	int cleaned_count = 0;
4386	bool cleaned = false;
4387	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4388
4389	i = rx_ring->next_to_clean;
4390	rx_desc = E1000_RX_DESC(*rx_ring, i);
4391	buffer_info = &rx_ring->buffer_info[i];
4392
4393	while (rx_desc->status & E1000_RXD_STAT_DD) {
4394		struct sk_buff *skb;
4395		u8 *data;
4396		u8 status;
4397
4398		if (*work_done >= work_to_do)
4399			break;
4400		(*work_done)++;
4401		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4402
4403		status = rx_desc->status;
4404		length = le16_to_cpu(rx_desc->length);
4405
4406		data = buffer_info->rxbuf.data;
4407		prefetch(data);
4408		skb = e1000_copybreak(adapter, buffer_info, length, data);
4409		if (!skb) {
4410			unsigned int frag_len = e1000_frag_len(adapter);
4411
4412			skb = build_skb(data - E1000_HEADROOM, frag_len);
4413			if (!skb) {
4414				adapter->alloc_rx_buff_failed++;
4415				break;
4416			}
4417
4418			skb_reserve(skb, E1000_HEADROOM);
4419			dma_unmap_single(&pdev->dev, buffer_info->dma,
4420					 adapter->rx_buffer_len,
4421					 DMA_FROM_DEVICE);
4422			buffer_info->dma = 0;
4423			buffer_info->rxbuf.data = NULL;
4424		}
4425
4426		if (++i == rx_ring->count)
4427			i = 0;
4428
4429		next_rxd = E1000_RX_DESC(*rx_ring, i);
4430		prefetch(next_rxd);
4431
4432		next_buffer = &rx_ring->buffer_info[i];
4433
4434		cleaned = true;
4435		cleaned_count++;
4436
4437		/* !EOP means multiple descriptors were used to store a single
4438		 * packet, if thats the case we need to toss it.  In fact, we
4439		 * to toss every packet with the EOP bit clear and the next
4440		 * frame that _does_ have the EOP bit set, as it is by
4441		 * definition only a frame fragment
4442		 */
4443		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4444			adapter->discarding = true;
4445
4446		if (adapter->discarding) {
4447			/* All receives must fit into a single buffer */
4448			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4449			dev_kfree_skb(skb);
4450			if (status & E1000_RXD_STAT_EOP)
4451				adapter->discarding = false;
4452			goto next_desc;
4453		}
4454
4455		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4456			if (e1000_tbi_should_accept(adapter, status,
4457						    rx_desc->errors,
4458						    length, data)) {
4459				length--;
4460			} else if (netdev->features & NETIF_F_RXALL) {
4461				goto process_skb;
4462			} else {
4463				dev_kfree_skb(skb);
4464				goto next_desc;
4465			}
4466		}
4467
4468process_skb:
4469		total_rx_bytes += (length - 4); /* don't count FCS */
4470		total_rx_packets++;
4471
4472		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4473			/* adjust length to remove Ethernet CRC, this must be
4474			 * done after the TBI_ACCEPT workaround above
4475			 */
4476			length -= 4;
4477
4478		if (buffer_info->rxbuf.data == NULL)
4479			skb_put(skb, length);
4480		else /* copybreak skb */
4481			skb_trim(skb, length);
4482
4483		/* Receive Checksum Offload */
4484		e1000_rx_checksum(adapter,
4485				  (u32)(status) |
4486				  ((u32)(rx_desc->errors) << 24),
4487				  le16_to_cpu(rx_desc->csum), skb);
4488
4489		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4490
4491next_desc:
4492		rx_desc->status = 0;
4493
4494		/* return some buffers to hardware, one at a time is too slow */
4495		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4496			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4497			cleaned_count = 0;
4498		}
4499
4500		/* use prefetched values */
4501		rx_desc = next_rxd;
4502		buffer_info = next_buffer;
4503	}
4504	rx_ring->next_to_clean = i;
4505
4506	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4507	if (cleaned_count)
4508		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4509
4510	adapter->total_rx_packets += total_rx_packets;
4511	adapter->total_rx_bytes += total_rx_bytes;
4512	netdev->stats.rx_bytes += total_rx_bytes;
4513	netdev->stats.rx_packets += total_rx_packets;
4514	return cleaned;
4515}
4516
4517/**
4518 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4519 * @adapter: address of board private structure
4520 * @rx_ring: pointer to receive ring structure
4521 * @cleaned_count: number of buffers to allocate this pass
4522 **/
4523static void
4524e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4525			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4526{
4527	struct pci_dev *pdev = adapter->pdev;
4528	struct e1000_rx_desc *rx_desc;
4529	struct e1000_rx_buffer *buffer_info;
4530	unsigned int i;
4531
4532	i = rx_ring->next_to_use;
4533	buffer_info = &rx_ring->buffer_info[i];
4534
4535	while (cleaned_count--) {
4536		/* allocate a new page if necessary */
4537		if (!buffer_info->rxbuf.page) {
4538			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4539			if (unlikely(!buffer_info->rxbuf.page)) {
4540				adapter->alloc_rx_buff_failed++;
4541				break;
4542			}
4543		}
4544
4545		if (!buffer_info->dma) {
4546			buffer_info->dma = dma_map_page(&pdev->dev,
4547							buffer_info->rxbuf.page, 0,
4548							adapter->rx_buffer_len,
4549							DMA_FROM_DEVICE);
4550			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4551				put_page(buffer_info->rxbuf.page);
4552				buffer_info->rxbuf.page = NULL;
4553				buffer_info->dma = 0;
4554				adapter->alloc_rx_buff_failed++;
4555				break;
4556			}
4557		}
4558
4559		rx_desc = E1000_RX_DESC(*rx_ring, i);
4560		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4561
4562		if (unlikely(++i == rx_ring->count))
4563			i = 0;
4564		buffer_info = &rx_ring->buffer_info[i];
4565	}
4566
4567	if (likely(rx_ring->next_to_use != i)) {
4568		rx_ring->next_to_use = i;
4569		if (unlikely(i-- == 0))
4570			i = (rx_ring->count - 1);
4571
4572		/* Force memory writes to complete before letting h/w
4573		 * know there are new descriptors to fetch.  (Only
4574		 * applicable for weak-ordered memory model archs,
4575		 * such as IA-64).
4576		 */
4577		wmb();
4578		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4579	}
4580}
4581
4582/**
4583 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4584 * @adapter: address of board private structure
 
 
4585 **/
4586static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4587				   struct e1000_rx_ring *rx_ring,
4588				   int cleaned_count)
4589{
4590	struct e1000_hw *hw = &adapter->hw;
4591	struct pci_dev *pdev = adapter->pdev;
4592	struct e1000_rx_desc *rx_desc;
4593	struct e1000_rx_buffer *buffer_info;
4594	unsigned int i;
4595	unsigned int bufsz = adapter->rx_buffer_len;
4596
4597	i = rx_ring->next_to_use;
4598	buffer_info = &rx_ring->buffer_info[i];
4599
4600	while (cleaned_count--) {
4601		void *data;
4602
4603		if (buffer_info->rxbuf.data)
4604			goto skip;
4605
4606		data = e1000_alloc_frag(adapter);
4607		if (!data) {
4608			/* Better luck next round */
4609			adapter->alloc_rx_buff_failed++;
4610			break;
4611		}
4612
4613		/* Fix for errata 23, can't cross 64kB boundary */
4614		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4615			void *olddata = data;
4616			e_err(rx_err, "skb align check failed: %u bytes at "
4617			      "%p\n", bufsz, data);
4618			/* Try again, without freeing the previous */
4619			data = e1000_alloc_frag(adapter);
4620			/* Failed allocation, critical failure */
4621			if (!data) {
4622				skb_free_frag(olddata);
4623				adapter->alloc_rx_buff_failed++;
4624				break;
4625			}
4626
4627			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4628				/* give up */
4629				skb_free_frag(data);
4630				skb_free_frag(olddata);
4631				adapter->alloc_rx_buff_failed++;
4632				break;
4633			}
4634
4635			/* Use new allocation */
4636			skb_free_frag(olddata);
4637		}
4638		buffer_info->dma = dma_map_single(&pdev->dev,
4639						  data,
4640						  adapter->rx_buffer_len,
4641						  DMA_FROM_DEVICE);
4642		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4643			skb_free_frag(data);
4644			buffer_info->dma = 0;
4645			adapter->alloc_rx_buff_failed++;
4646			break;
4647		}
4648
4649		/* XXX if it was allocated cleanly it will never map to a
4650		 * boundary crossing
4651		 */
4652
4653		/* Fix for errata 23, can't cross 64kB boundary */
4654		if (!e1000_check_64k_bound(adapter,
4655					(void *)(unsigned long)buffer_info->dma,
4656					adapter->rx_buffer_len)) {
4657			e_err(rx_err, "dma align check failed: %u bytes at "
4658			      "%p\n", adapter->rx_buffer_len,
4659			      (void *)(unsigned long)buffer_info->dma);
4660
4661			dma_unmap_single(&pdev->dev, buffer_info->dma,
4662					 adapter->rx_buffer_len,
4663					 DMA_FROM_DEVICE);
4664
4665			skb_free_frag(data);
4666			buffer_info->rxbuf.data = NULL;
4667			buffer_info->dma = 0;
4668
4669			adapter->alloc_rx_buff_failed++;
4670			break;
4671		}
4672		buffer_info->rxbuf.data = data;
4673 skip:
4674		rx_desc = E1000_RX_DESC(*rx_ring, i);
4675		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4676
4677		if (unlikely(++i == rx_ring->count))
4678			i = 0;
4679		buffer_info = &rx_ring->buffer_info[i];
4680	}
4681
4682	if (likely(rx_ring->next_to_use != i)) {
4683		rx_ring->next_to_use = i;
4684		if (unlikely(i-- == 0))
4685			i = (rx_ring->count - 1);
4686
4687		/* Force memory writes to complete before letting h/w
4688		 * know there are new descriptors to fetch.  (Only
4689		 * applicable for weak-ordered memory model archs,
4690		 * such as IA-64).
4691		 */
4692		wmb();
4693		writel(i, hw->hw_addr + rx_ring->rdt);
4694	}
4695}
4696
4697/**
4698 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4699 * @adapter:
4700 **/
4701static void e1000_smartspeed(struct e1000_adapter *adapter)
4702{
4703	struct e1000_hw *hw = &adapter->hw;
4704	u16 phy_status;
4705	u16 phy_ctrl;
4706
4707	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4708	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4709		return;
4710
4711	if (adapter->smartspeed == 0) {
4712		/* If Master/Slave config fault is asserted twice,
4713		 * we assume back-to-back
4714		 */
4715		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4716		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4717			return;
4718		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4719		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4720			return;
4721		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4722		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4723			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4724			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4725					    phy_ctrl);
4726			adapter->smartspeed++;
4727			if (!e1000_phy_setup_autoneg(hw) &&
4728			   !e1000_read_phy_reg(hw, PHY_CTRL,
4729					       &phy_ctrl)) {
4730				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4731					     MII_CR_RESTART_AUTO_NEG);
4732				e1000_write_phy_reg(hw, PHY_CTRL,
4733						    phy_ctrl);
4734			}
4735		}
4736		return;
4737	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4738		/* If still no link, perhaps using 2/3 pair cable */
4739		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4740		phy_ctrl |= CR_1000T_MS_ENABLE;
4741		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4742		if (!e1000_phy_setup_autoneg(hw) &&
4743		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4744			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4745				     MII_CR_RESTART_AUTO_NEG);
4746			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4747		}
4748	}
4749	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4750	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4751		adapter->smartspeed = 0;
4752}
4753
4754/**
4755 * e1000_ioctl -
4756 * @netdev:
4757 * @ifreq:
4758 * @cmd:
4759 **/
4760static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4761{
4762	switch (cmd) {
4763	case SIOCGMIIPHY:
4764	case SIOCGMIIREG:
4765	case SIOCSMIIREG:
4766		return e1000_mii_ioctl(netdev, ifr, cmd);
4767	default:
4768		return -EOPNOTSUPP;
4769	}
4770}
4771
4772/**
4773 * e1000_mii_ioctl -
4774 * @netdev:
4775 * @ifreq:
4776 * @cmd:
4777 **/
4778static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4779			   int cmd)
4780{
4781	struct e1000_adapter *adapter = netdev_priv(netdev);
4782	struct e1000_hw *hw = &adapter->hw;
4783	struct mii_ioctl_data *data = if_mii(ifr);
4784	int retval;
4785	u16 mii_reg;
4786	unsigned long flags;
4787
4788	if (hw->media_type != e1000_media_type_copper)
4789		return -EOPNOTSUPP;
4790
4791	switch (cmd) {
4792	case SIOCGMIIPHY:
4793		data->phy_id = hw->phy_addr;
4794		break;
4795	case SIOCGMIIREG:
4796		spin_lock_irqsave(&adapter->stats_lock, flags);
4797		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4798				   &data->val_out)) {
4799			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4800			return -EIO;
4801		}
4802		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4803		break;
4804	case SIOCSMIIREG:
4805		if (data->reg_num & ~(0x1F))
4806			return -EFAULT;
4807		mii_reg = data->val_in;
4808		spin_lock_irqsave(&adapter->stats_lock, flags);
4809		if (e1000_write_phy_reg(hw, data->reg_num,
4810					mii_reg)) {
4811			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4812			return -EIO;
4813		}
4814		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4815		if (hw->media_type == e1000_media_type_copper) {
4816			switch (data->reg_num) {
4817			case PHY_CTRL:
4818				if (mii_reg & MII_CR_POWER_DOWN)
4819					break;
4820				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4821					hw->autoneg = 1;
4822					hw->autoneg_advertised = 0x2F;
4823				} else {
4824					u32 speed;
4825					if (mii_reg & 0x40)
4826						speed = SPEED_1000;
4827					else if (mii_reg & 0x2000)
4828						speed = SPEED_100;
4829					else
4830						speed = SPEED_10;
4831					retval = e1000_set_spd_dplx(
4832						adapter, speed,
4833						((mii_reg & 0x100)
4834						 ? DUPLEX_FULL :
4835						 DUPLEX_HALF));
4836					if (retval)
4837						return retval;
4838				}
4839				if (netif_running(adapter->netdev))
4840					e1000_reinit_locked(adapter);
4841				else
4842					e1000_reset(adapter);
4843				break;
4844			case M88E1000_PHY_SPEC_CTRL:
4845			case M88E1000_EXT_PHY_SPEC_CTRL:
4846				if (e1000_phy_reset(hw))
4847					return -EIO;
4848				break;
4849			}
4850		} else {
4851			switch (data->reg_num) {
4852			case PHY_CTRL:
4853				if (mii_reg & MII_CR_POWER_DOWN)
4854					break;
4855				if (netif_running(adapter->netdev))
4856					e1000_reinit_locked(adapter);
4857				else
4858					e1000_reset(adapter);
4859				break;
4860			}
4861		}
4862		break;
4863	default:
4864		return -EOPNOTSUPP;
4865	}
4866	return E1000_SUCCESS;
4867}
4868
4869void e1000_pci_set_mwi(struct e1000_hw *hw)
4870{
4871	struct e1000_adapter *adapter = hw->back;
4872	int ret_val = pci_set_mwi(adapter->pdev);
4873
4874	if (ret_val)
4875		e_err(probe, "Error in setting MWI\n");
4876}
4877
4878void e1000_pci_clear_mwi(struct e1000_hw *hw)
4879{
4880	struct e1000_adapter *adapter = hw->back;
4881
4882	pci_clear_mwi(adapter->pdev);
4883}
4884
4885int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4886{
4887	struct e1000_adapter *adapter = hw->back;
4888	return pcix_get_mmrbc(adapter->pdev);
4889}
4890
4891void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4892{
4893	struct e1000_adapter *adapter = hw->back;
4894	pcix_set_mmrbc(adapter->pdev, mmrbc);
4895}
4896
4897void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4898{
4899	outl(value, port);
4900}
4901
4902static bool e1000_vlan_used(struct e1000_adapter *adapter)
4903{
4904	u16 vid;
4905
4906	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4907		return true;
4908	return false;
4909}
4910
4911static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4912			      netdev_features_t features)
4913{
4914	struct e1000_hw *hw = &adapter->hw;
4915	u32 ctrl;
4916
4917	ctrl = er32(CTRL);
4918	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4919		/* enable VLAN tag insert/strip */
4920		ctrl |= E1000_CTRL_VME;
4921	} else {
4922		/* disable VLAN tag insert/strip */
4923		ctrl &= ~E1000_CTRL_VME;
4924	}
4925	ew32(CTRL, ctrl);
4926}
4927static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4928				     bool filter_on)
4929{
4930	struct e1000_hw *hw = &adapter->hw;
4931	u32 rctl;
4932
4933	if (!test_bit(__E1000_DOWN, &adapter->flags))
4934		e1000_irq_disable(adapter);
4935
4936	__e1000_vlan_mode(adapter, adapter->netdev->features);
4937	if (filter_on) {
4938		/* enable VLAN receive filtering */
4939		rctl = er32(RCTL);
4940		rctl &= ~E1000_RCTL_CFIEN;
4941		if (!(adapter->netdev->flags & IFF_PROMISC))
4942			rctl |= E1000_RCTL_VFE;
4943		ew32(RCTL, rctl);
4944		e1000_update_mng_vlan(adapter);
4945	} else {
4946		/* disable VLAN receive filtering */
4947		rctl = er32(RCTL);
4948		rctl &= ~E1000_RCTL_VFE;
4949		ew32(RCTL, rctl);
4950	}
4951
4952	if (!test_bit(__E1000_DOWN, &adapter->flags))
4953		e1000_irq_enable(adapter);
4954}
4955
4956static void e1000_vlan_mode(struct net_device *netdev,
4957			    netdev_features_t features)
4958{
4959	struct e1000_adapter *adapter = netdev_priv(netdev);
4960
4961	if (!test_bit(__E1000_DOWN, &adapter->flags))
4962		e1000_irq_disable(adapter);
4963
4964	__e1000_vlan_mode(adapter, features);
4965
4966	if (!test_bit(__E1000_DOWN, &adapter->flags))
4967		e1000_irq_enable(adapter);
4968}
4969
4970static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4971				 __be16 proto, u16 vid)
4972{
4973	struct e1000_adapter *adapter = netdev_priv(netdev);
4974	struct e1000_hw *hw = &adapter->hw;
4975	u32 vfta, index;
4976
4977	if ((hw->mng_cookie.status &
4978	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4979	    (vid == adapter->mng_vlan_id))
4980		return 0;
4981
4982	if (!e1000_vlan_used(adapter))
4983		e1000_vlan_filter_on_off(adapter, true);
4984
4985	/* add VID to filter table */
4986	index = (vid >> 5) & 0x7F;
4987	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4988	vfta |= (1 << (vid & 0x1F));
4989	e1000_write_vfta(hw, index, vfta);
4990
4991	set_bit(vid, adapter->active_vlans);
4992
4993	return 0;
4994}
4995
4996static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4997				  __be16 proto, u16 vid)
4998{
4999	struct e1000_adapter *adapter = netdev_priv(netdev);
5000	struct e1000_hw *hw = &adapter->hw;
5001	u32 vfta, index;
5002
5003	if (!test_bit(__E1000_DOWN, &adapter->flags))
5004		e1000_irq_disable(adapter);
5005	if (!test_bit(__E1000_DOWN, &adapter->flags))
5006		e1000_irq_enable(adapter);
5007
5008	/* remove VID from filter table */
5009	index = (vid >> 5) & 0x7F;
5010	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5011	vfta &= ~(1 << (vid & 0x1F));
5012	e1000_write_vfta(hw, index, vfta);
5013
5014	clear_bit(vid, adapter->active_vlans);
5015
5016	if (!e1000_vlan_used(adapter))
5017		e1000_vlan_filter_on_off(adapter, false);
5018
5019	return 0;
5020}
5021
5022static void e1000_restore_vlan(struct e1000_adapter *adapter)
5023{
5024	u16 vid;
5025
5026	if (!e1000_vlan_used(adapter))
5027		return;
5028
5029	e1000_vlan_filter_on_off(adapter, true);
5030	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5031		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5032}
5033
5034int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5035{
5036	struct e1000_hw *hw = &adapter->hw;
5037
5038	hw->autoneg = 0;
5039
5040	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5041	 * for the switch() below to work
5042	 */
5043	if ((spd & 1) || (dplx & ~1))
5044		goto err_inval;
5045
5046	/* Fiber NICs only allow 1000 gbps Full duplex */
5047	if ((hw->media_type == e1000_media_type_fiber) &&
5048	    spd != SPEED_1000 &&
5049	    dplx != DUPLEX_FULL)
5050		goto err_inval;
5051
5052	switch (spd + dplx) {
5053	case SPEED_10 + DUPLEX_HALF:
5054		hw->forced_speed_duplex = e1000_10_half;
5055		break;
5056	case SPEED_10 + DUPLEX_FULL:
5057		hw->forced_speed_duplex = e1000_10_full;
5058		break;
5059	case SPEED_100 + DUPLEX_HALF:
5060		hw->forced_speed_duplex = e1000_100_half;
5061		break;
5062	case SPEED_100 + DUPLEX_FULL:
5063		hw->forced_speed_duplex = e1000_100_full;
5064		break;
5065	case SPEED_1000 + DUPLEX_FULL:
5066		hw->autoneg = 1;
5067		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5068		break;
5069	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5070	default:
5071		goto err_inval;
5072	}
5073
5074	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5075	hw->mdix = AUTO_ALL_MODES;
5076
5077	return 0;
5078
5079err_inval:
5080	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5081	return -EINVAL;
5082}
5083
5084static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5085{
5086	struct net_device *netdev = pci_get_drvdata(pdev);
5087	struct e1000_adapter *adapter = netdev_priv(netdev);
5088	struct e1000_hw *hw = &adapter->hw;
5089	u32 ctrl, ctrl_ext, rctl, status;
5090	u32 wufc = adapter->wol;
5091#ifdef CONFIG_PM
5092	int retval = 0;
5093#endif
5094
5095	netif_device_detach(netdev);
5096
5097	if (netif_running(netdev)) {
5098		int count = E1000_CHECK_RESET_COUNT;
5099
5100		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5101			usleep_range(10000, 20000);
5102
5103		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5104		e1000_down(adapter);
5105	}
5106
5107#ifdef CONFIG_PM
5108	retval = pci_save_state(pdev);
5109	if (retval)
5110		return retval;
5111#endif
5112
5113	status = er32(STATUS);
5114	if (status & E1000_STATUS_LU)
5115		wufc &= ~E1000_WUFC_LNKC;
5116
5117	if (wufc) {
5118		e1000_setup_rctl(adapter);
5119		e1000_set_rx_mode(netdev);
5120
5121		rctl = er32(RCTL);
5122
5123		/* turn on all-multi mode if wake on multicast is enabled */
5124		if (wufc & E1000_WUFC_MC)
5125			rctl |= E1000_RCTL_MPE;
5126
5127		/* enable receives in the hardware */
5128		ew32(RCTL, rctl | E1000_RCTL_EN);
5129
5130		if (hw->mac_type >= e1000_82540) {
5131			ctrl = er32(CTRL);
5132			/* advertise wake from D3Cold */
5133			#define E1000_CTRL_ADVD3WUC 0x00100000
5134			/* phy power management enable */
5135			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5136			ctrl |= E1000_CTRL_ADVD3WUC |
5137				E1000_CTRL_EN_PHY_PWR_MGMT;
5138			ew32(CTRL, ctrl);
5139		}
5140
5141		if (hw->media_type == e1000_media_type_fiber ||
5142		    hw->media_type == e1000_media_type_internal_serdes) {
5143			/* keep the laser running in D3 */
5144			ctrl_ext = er32(CTRL_EXT);
5145			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5146			ew32(CTRL_EXT, ctrl_ext);
5147		}
5148
5149		ew32(WUC, E1000_WUC_PME_EN);
5150		ew32(WUFC, wufc);
5151	} else {
5152		ew32(WUC, 0);
5153		ew32(WUFC, 0);
5154	}
5155
5156	e1000_release_manageability(adapter);
5157
5158	*enable_wake = !!wufc;
5159
5160	/* make sure adapter isn't asleep if manageability is enabled */
5161	if (adapter->en_mng_pt)
5162		*enable_wake = true;
5163
5164	if (netif_running(netdev))
5165		e1000_free_irq(adapter);
5166
5167	pci_disable_device(pdev);
 
5168
5169	return 0;
5170}
5171
5172#ifdef CONFIG_PM
5173static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5174{
5175	int retval;
 
5176	bool wake;
5177
5178	retval = __e1000_shutdown(pdev, &wake);
5179	if (retval)
5180		return retval;
5181
5182	if (wake) {
5183		pci_prepare_to_sleep(pdev);
5184	} else {
5185		pci_wake_from_d3(pdev, false);
5186		pci_set_power_state(pdev, PCI_D3hot);
5187	}
5188
5189	return 0;
5190}
5191
5192static int e1000_resume(struct pci_dev *pdev)
5193{
 
5194	struct net_device *netdev = pci_get_drvdata(pdev);
5195	struct e1000_adapter *adapter = netdev_priv(netdev);
5196	struct e1000_hw *hw = &adapter->hw;
5197	u32 err;
5198
5199	pci_set_power_state(pdev, PCI_D0);
5200	pci_restore_state(pdev);
5201	pci_save_state(pdev);
5202
5203	if (adapter->need_ioport)
5204		err = pci_enable_device(pdev);
5205	else
5206		err = pci_enable_device_mem(pdev);
5207	if (err) {
5208		pr_err("Cannot enable PCI device from suspend\n");
5209		return err;
5210	}
 
 
 
 
5211	pci_set_master(pdev);
5212
5213	pci_enable_wake(pdev, PCI_D3hot, 0);
5214	pci_enable_wake(pdev, PCI_D3cold, 0);
5215
5216	if (netif_running(netdev)) {
5217		err = e1000_request_irq(adapter);
5218		if (err)
5219			return err;
5220	}
5221
5222	e1000_power_up_phy(adapter);
5223	e1000_reset(adapter);
5224	ew32(WUS, ~0);
5225
5226	e1000_init_manageability(adapter);
5227
5228	if (netif_running(netdev))
5229		e1000_up(adapter);
5230
5231	netif_device_attach(netdev);
5232
5233	return 0;
5234}
5235#endif
5236
5237static void e1000_shutdown(struct pci_dev *pdev)
5238{
5239	bool wake;
5240
5241	__e1000_shutdown(pdev, &wake);
5242
5243	if (system_state == SYSTEM_POWER_OFF) {
5244		pci_wake_from_d3(pdev, wake);
5245		pci_set_power_state(pdev, PCI_D3hot);
5246	}
5247}
5248
5249#ifdef CONFIG_NET_POLL_CONTROLLER
5250/* Polling 'interrupt' - used by things like netconsole to send skbs
5251 * without having to re-enable interrupts. It's not called while
5252 * the interrupt routine is executing.
5253 */
5254static void e1000_netpoll(struct net_device *netdev)
5255{
5256	struct e1000_adapter *adapter = netdev_priv(netdev);
5257
5258	if (disable_hardirq(adapter->pdev->irq))
5259		e1000_intr(adapter->pdev->irq, netdev);
5260	enable_irq(adapter->pdev->irq);
5261}
5262#endif
5263
5264/**
5265 * e1000_io_error_detected - called when PCI error is detected
5266 * @pdev: Pointer to PCI device
5267 * @state: The current pci connection state
5268 *
5269 * This function is called after a PCI bus error affecting
5270 * this device has been detected.
5271 */
5272static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5273						pci_channel_state_t state)
5274{
5275	struct net_device *netdev = pci_get_drvdata(pdev);
5276	struct e1000_adapter *adapter = netdev_priv(netdev);
5277
5278	netif_device_detach(netdev);
5279
5280	if (state == pci_channel_io_perm_failure)
5281		return PCI_ERS_RESULT_DISCONNECT;
5282
5283	if (netif_running(netdev))
5284		e1000_down(adapter);
5285	pci_disable_device(pdev);
5286
5287	/* Request a slot slot reset. */
 
 
 
5288	return PCI_ERS_RESULT_NEED_RESET;
5289}
5290
5291/**
5292 * e1000_io_slot_reset - called after the pci bus has been reset.
5293 * @pdev: Pointer to PCI device
5294 *
5295 * Restart the card from scratch, as if from a cold-boot. Implementation
5296 * resembles the first-half of the e1000_resume routine.
5297 */
5298static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5299{
5300	struct net_device *netdev = pci_get_drvdata(pdev);
5301	struct e1000_adapter *adapter = netdev_priv(netdev);
5302	struct e1000_hw *hw = &adapter->hw;
5303	int err;
5304
5305	if (adapter->need_ioport)
5306		err = pci_enable_device(pdev);
5307	else
5308		err = pci_enable_device_mem(pdev);
5309	if (err) {
5310		pr_err("Cannot re-enable PCI device after reset.\n");
5311		return PCI_ERS_RESULT_DISCONNECT;
5312	}
 
 
 
 
5313	pci_set_master(pdev);
5314
5315	pci_enable_wake(pdev, PCI_D3hot, 0);
5316	pci_enable_wake(pdev, PCI_D3cold, 0);
5317
5318	e1000_reset(adapter);
5319	ew32(WUS, ~0);
5320
5321	return PCI_ERS_RESULT_RECOVERED;
5322}
5323
5324/**
5325 * e1000_io_resume - called when traffic can start flowing again.
5326 * @pdev: Pointer to PCI device
5327 *
5328 * This callback is called when the error recovery driver tells us that
5329 * its OK to resume normal operation. Implementation resembles the
5330 * second-half of the e1000_resume routine.
5331 */
5332static void e1000_io_resume(struct pci_dev *pdev)
5333{
5334	struct net_device *netdev = pci_get_drvdata(pdev);
5335	struct e1000_adapter *adapter = netdev_priv(netdev);
5336
5337	e1000_init_manageability(adapter);
5338
5339	if (netif_running(netdev)) {
5340		if (e1000_up(adapter)) {
5341			pr_info("can't bring device back up after reset\n");
5342			return;
5343		}
5344	}
5345
5346	netif_device_attach(netdev);
5347}
5348
5349/* e1000_main.c */