Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.9.4.
   1/*******************************************************************************
   2
   3  Intel PRO/10GbE Linux driver
   4  Copyright(c) 1999 - 2008 Intel Corporation.
   5
   6  This program is free software; you can redistribute it and/or modify it
   7  under the terms and conditions of the GNU General Public License,
   8  version 2, as published by the Free Software Foundation.
   9
  10  This program is distributed in the hope it will be useful, but WITHOUT
  11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13  more details.
  14
  15  You should have received a copy of the GNU General Public License along with
  16  this program; if not, write to the Free Software Foundation, Inc.,
  17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18
  19  The full GNU General Public License is included in this distribution in
  20  the file called "COPYING".
  21
  22  Contact Information:
  23  Linux NICS <linux.nics@intel.com>
  24  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  26
  27*******************************************************************************/
  28
  29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30
  31#include <linux/prefetch.h>
  32#include "ixgb.h"
  33
  34char ixgb_driver_name[] = "ixgb";
  35static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
  36
  37#define DRIVERNAPI "-NAPI"
  38#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
  39const char ixgb_driver_version[] = DRV_VERSION;
  40static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
  41
  42#define IXGB_CB_LENGTH 256
  43static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
  44module_param(copybreak, uint, 0644);
  45MODULE_PARM_DESC(copybreak,
  46	"Maximum size of packet that is copied to a new buffer on receive");
  47
  48/* ixgb_pci_tbl - PCI Device ID Table
  49 *
  50 * Wildcard entries (PCI_ANY_ID) should come last
  51 * Last entry must be all 0s
  52 *
  53 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  54 *   Class, Class Mask, private data (not used) }
  55 */
  56static const struct pci_device_id ixgb_pci_tbl[] = {
  57	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
  58	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  59	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
  60	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  61	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
  62	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  63	{PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
  64	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  65
  66	/* required last entry */
  67	{0,}
  68};
  69
  70MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
  71
  72/* Local Function Prototypes */
  73static int ixgb_init_module(void);
  74static void ixgb_exit_module(void);
  75static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  76static void ixgb_remove(struct pci_dev *pdev);
  77static int ixgb_sw_init(struct ixgb_adapter *adapter);
  78static int ixgb_open(struct net_device *netdev);
  79static int ixgb_close(struct net_device *netdev);
  80static void ixgb_configure_tx(struct ixgb_adapter *adapter);
  81static void ixgb_configure_rx(struct ixgb_adapter *adapter);
  82static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
  83static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
  84static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
  85static void ixgb_set_multi(struct net_device *netdev);
  86static void ixgb_watchdog(unsigned long data);
  87static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
  88				   struct net_device *netdev);
  89static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
  90static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
  91static int ixgb_set_mac(struct net_device *netdev, void *p);
  92static irqreturn_t ixgb_intr(int irq, void *data);
  93static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
  94
  95static int ixgb_clean(struct napi_struct *, int);
  96static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
  97static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
  98
  99static void ixgb_tx_timeout(struct net_device *dev);
 100static void ixgb_tx_timeout_task(struct work_struct *work);
 101
 102static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
 103static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
 104static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
 105				__be16 proto, u16 vid);
 106static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
 107				 __be16 proto, u16 vid);
 108static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 109
 110#ifdef CONFIG_NET_POLL_CONTROLLER
 111/* for netdump / net console */
 112static void ixgb_netpoll(struct net_device *dev);
 113#endif
 114
 115static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
 116                             enum pci_channel_state state);
 117static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
 118static void ixgb_io_resume (struct pci_dev *pdev);
 119
 120static const struct pci_error_handlers ixgb_err_handler = {
 121	.error_detected = ixgb_io_error_detected,
 122	.slot_reset = ixgb_io_slot_reset,
 123	.resume = ixgb_io_resume,
 124};
 125
 126static struct pci_driver ixgb_driver = {
 127	.name     = ixgb_driver_name,
 128	.id_table = ixgb_pci_tbl,
 129	.probe    = ixgb_probe,
 130	.remove   = ixgb_remove,
 131	.err_handler = &ixgb_err_handler
 132};
 133
 134MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 135MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
 136MODULE_LICENSE("GPL");
 137MODULE_VERSION(DRV_VERSION);
 138
 139#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 140static int debug = -1;
 141module_param(debug, int, 0);
 142MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 143
 144/**
 145 * ixgb_init_module - Driver Registration Routine
 146 *
 147 * ixgb_init_module is the first routine called when the driver is
 148 * loaded. All it does is register with the PCI subsystem.
 149 **/
 150
 151static int __init
 152ixgb_init_module(void)
 153{
 154	pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
 155	pr_info("%s\n", ixgb_copyright);
 156
 157	return pci_register_driver(&ixgb_driver);
 158}
 159
 160module_init(ixgb_init_module);
 161
 162/**
 163 * ixgb_exit_module - Driver Exit Cleanup Routine
 164 *
 165 * ixgb_exit_module is called just before the driver is removed
 166 * from memory.
 167 **/
 168
 169static void __exit
 170ixgb_exit_module(void)
 171{
 172	pci_unregister_driver(&ixgb_driver);
 173}
 174
 175module_exit(ixgb_exit_module);
 176
 177/**
 178 * ixgb_irq_disable - Mask off interrupt generation on the NIC
 179 * @adapter: board private structure
 180 **/
 181
 182static void
 183ixgb_irq_disable(struct ixgb_adapter *adapter)
 184{
 185	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
 186	IXGB_WRITE_FLUSH(&adapter->hw);
 187	synchronize_irq(adapter->pdev->irq);
 188}
 189
 190/**
 191 * ixgb_irq_enable - Enable default interrupt generation settings
 192 * @adapter: board private structure
 193 **/
 194
 195static void
 196ixgb_irq_enable(struct ixgb_adapter *adapter)
 197{
 198	u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
 199		  IXGB_INT_TXDW | IXGB_INT_LSC;
 200	if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
 201		val |= IXGB_INT_GPI0;
 202	IXGB_WRITE_REG(&adapter->hw, IMS, val);
 203	IXGB_WRITE_FLUSH(&adapter->hw);
 204}
 205
 206int
 207ixgb_up(struct ixgb_adapter *adapter)
 208{
 209	struct net_device *netdev = adapter->netdev;
 210	int err, irq_flags = IRQF_SHARED;
 211	int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 212	struct ixgb_hw *hw = &adapter->hw;
 213
 214	/* hardware has been reset, we need to reload some things */
 215
 216	ixgb_rar_set(hw, netdev->dev_addr, 0);
 217	ixgb_set_multi(netdev);
 218
 219	ixgb_restore_vlan(adapter);
 220
 221	ixgb_configure_tx(adapter);
 222	ixgb_setup_rctl(adapter);
 223	ixgb_configure_rx(adapter);
 224	ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
 225
 226	/* disable interrupts and get the hardware into a known state */
 227	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
 228
 229	/* only enable MSI if bus is in PCI-X mode */
 230	if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
 231		err = pci_enable_msi(adapter->pdev);
 232		if (!err) {
 233			adapter->have_msi = true;
 234			irq_flags = 0;
 235		}
 236		/* proceed to try to request regular interrupt */
 237	}
 238
 239	err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
 240	                  netdev->name, netdev);
 241	if (err) {
 242		if (adapter->have_msi)
 243			pci_disable_msi(adapter->pdev);
 244		netif_err(adapter, probe, adapter->netdev,
 245			  "Unable to allocate interrupt Error: %d\n", err);
 246		return err;
 247	}
 248
 249	if ((hw->max_frame_size != max_frame) ||
 250		(hw->max_frame_size !=
 251		(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
 252
 253		hw->max_frame_size = max_frame;
 254
 255		IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 256
 257		if (hw->max_frame_size >
 258		   IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 259			u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 260
 261			if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 262				ctrl0 |= IXGB_CTRL0_JFE;
 263				IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 264			}
 265		}
 266	}
 267
 268	clear_bit(__IXGB_DOWN, &adapter->flags);
 269
 270	napi_enable(&adapter->napi);
 271	ixgb_irq_enable(adapter);
 272
 273	netif_wake_queue(netdev);
 274
 275	mod_timer(&adapter->watchdog_timer, jiffies);
 276
 277	return 0;
 278}
 279
 280void
 281ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
 282{
 283	struct net_device *netdev = adapter->netdev;
 284
 285	/* prevent the interrupt handler from restarting watchdog */
 286	set_bit(__IXGB_DOWN, &adapter->flags);
 287
 288	netif_carrier_off(netdev);
 289
 290	napi_disable(&adapter->napi);
 291	/* waiting for NAPI to complete can re-enable interrupts */
 292	ixgb_irq_disable(adapter);
 293	free_irq(adapter->pdev->irq, netdev);
 294
 295	if (adapter->have_msi)
 296		pci_disable_msi(adapter->pdev);
 297
 298	if (kill_watchdog)
 299		del_timer_sync(&adapter->watchdog_timer);
 300
 301	adapter->link_speed = 0;
 302	adapter->link_duplex = 0;
 303	netif_stop_queue(netdev);
 304
 305	ixgb_reset(adapter);
 306	ixgb_clean_tx_ring(adapter);
 307	ixgb_clean_rx_ring(adapter);
 308}
 309
 310void
 311ixgb_reset(struct ixgb_adapter *adapter)
 312{
 313	struct ixgb_hw *hw = &adapter->hw;
 314
 315	ixgb_adapter_stop(hw);
 316	if (!ixgb_init_hw(hw))
 317		netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
 318
 319	/* restore frame size information */
 320	IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
 321	if (hw->max_frame_size >
 322	    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
 323		u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
 324		if (!(ctrl0 & IXGB_CTRL0_JFE)) {
 325			ctrl0 |= IXGB_CTRL0_JFE;
 326			IXGB_WRITE_REG(hw, CTRL0, ctrl0);
 327		}
 328	}
 329}
 330
 331static netdev_features_t
 332ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
 333{
 334	/*
 335	 * Tx VLAN insertion does not work per HW design when Rx stripping is
 336	 * disabled.
 337	 */
 338	if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
 339		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 340
 341	return features;
 342}
 343
 344static int
 345ixgb_set_features(struct net_device *netdev, netdev_features_t features)
 346{
 347	struct ixgb_adapter *adapter = netdev_priv(netdev);
 348	netdev_features_t changed = features ^ netdev->features;
 349
 350	if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
 351		return 0;
 352
 353	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 354
 355	if (netif_running(netdev)) {
 356		ixgb_down(adapter, true);
 357		ixgb_up(adapter);
 358		ixgb_set_speed_duplex(netdev);
 359	} else
 360		ixgb_reset(adapter);
 361
 362	return 0;
 363}
 364
 365
 366static const struct net_device_ops ixgb_netdev_ops = {
 367	.ndo_open 		= ixgb_open,
 368	.ndo_stop		= ixgb_close,
 369	.ndo_start_xmit		= ixgb_xmit_frame,
 370	.ndo_get_stats		= ixgb_get_stats,
 371	.ndo_set_rx_mode	= ixgb_set_multi,
 372	.ndo_validate_addr	= eth_validate_addr,
 373	.ndo_set_mac_address	= ixgb_set_mac,
 374	.ndo_change_mtu		= ixgb_change_mtu,
 375	.ndo_tx_timeout		= ixgb_tx_timeout,
 376	.ndo_vlan_rx_add_vid	= ixgb_vlan_rx_add_vid,
 377	.ndo_vlan_rx_kill_vid	= ixgb_vlan_rx_kill_vid,
 378#ifdef CONFIG_NET_POLL_CONTROLLER
 379	.ndo_poll_controller	= ixgb_netpoll,
 380#endif
 381	.ndo_fix_features       = ixgb_fix_features,
 382	.ndo_set_features       = ixgb_set_features,
 383};
 384
 385/**
 386 * ixgb_probe - Device Initialization Routine
 387 * @pdev: PCI device information struct
 388 * @ent: entry in ixgb_pci_tbl
 389 *
 390 * Returns 0 on success, negative on failure
 391 *
 392 * ixgb_probe initializes an adapter identified by a pci_dev structure.
 393 * The OS initialization, configuring of the adapter private structure,
 394 * and a hardware reset occur.
 395 **/
 396
 397static int
 398ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 399{
 400	struct net_device *netdev = NULL;
 401	struct ixgb_adapter *adapter;
 402	static int cards_found = 0;
 403	int pci_using_dac;
 404	int i;
 405	int err;
 406
 407	err = pci_enable_device(pdev);
 408	if (err)
 409		return err;
 410
 411	pci_using_dac = 0;
 412	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
 413	if (!err) {
 414		pci_using_dac = 1;
 415	} else {
 416		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
 417		if (err) {
 418			pr_err("No usable DMA configuration, aborting\n");
 419			goto err_dma_mask;
 420		}
 421	}
 422
 423	err = pci_request_regions(pdev, ixgb_driver_name);
 424	if (err)
 425		goto err_request_regions;
 426
 427	pci_set_master(pdev);
 428
 429	netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
 430	if (!netdev) {
 431		err = -ENOMEM;
 432		goto err_alloc_etherdev;
 433	}
 434
 435	SET_NETDEV_DEV(netdev, &pdev->dev);
 436
 437	pci_set_drvdata(pdev, netdev);
 438	adapter = netdev_priv(netdev);
 439	adapter->netdev = netdev;
 440	adapter->pdev = pdev;
 441	adapter->hw.back = adapter;
 442	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 443
 444	adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
 445	if (!adapter->hw.hw_addr) {
 446		err = -EIO;
 447		goto err_ioremap;
 448	}
 449
 450	for (i = BAR_1; i <= BAR_5; i++) {
 451		if (pci_resource_len(pdev, i) == 0)
 452			continue;
 453		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 454			adapter->hw.io_base = pci_resource_start(pdev, i);
 455			break;
 456		}
 457	}
 458
 459	netdev->netdev_ops = &ixgb_netdev_ops;
 460	ixgb_set_ethtool_ops(netdev);
 461	netdev->watchdog_timeo = 5 * HZ;
 462	netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
 463
 464	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 465
 466	adapter->bd_number = cards_found;
 467	adapter->link_speed = 0;
 468	adapter->link_duplex = 0;
 469
 470	/* setup the private structure */
 471
 472	err = ixgb_sw_init(adapter);
 473	if (err)
 474		goto err_sw_init;
 475
 476	netdev->hw_features = NETIF_F_SG |
 477			   NETIF_F_TSO |
 478			   NETIF_F_HW_CSUM |
 479			   NETIF_F_HW_VLAN_CTAG_TX |
 480			   NETIF_F_HW_VLAN_CTAG_RX;
 481	netdev->features = netdev->hw_features |
 482			   NETIF_F_HW_VLAN_CTAG_FILTER;
 483	netdev->hw_features |= NETIF_F_RXCSUM;
 484
 485	if (pci_using_dac) {
 486		netdev->features |= NETIF_F_HIGHDMA;
 487		netdev->vlan_features |= NETIF_F_HIGHDMA;
 488	}
 489
 490	/* MTU range: 68 - 16114 */
 491	netdev->min_mtu = ETH_MIN_MTU;
 492	netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
 493
 494	/* make sure the EEPROM is good */
 495
 496	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
 497		netif_err(adapter, probe, adapter->netdev,
 498			  "The EEPROM Checksum Is Not Valid\n");
 499		err = -EIO;
 500		goto err_eeprom;
 501	}
 502
 503	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
 504
 505	if (!is_valid_ether_addr(netdev->dev_addr)) {
 506		netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
 507		err = -EIO;
 508		goto err_eeprom;
 509	}
 510
 511	adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
 512
 513	init_timer(&adapter->watchdog_timer);
 514	adapter->watchdog_timer.function = ixgb_watchdog;
 515	adapter->watchdog_timer.data = (unsigned long)adapter;
 516
 517	INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
 518
 519	strcpy(netdev->name, "eth%d");
 520	err = register_netdev(netdev);
 521	if (err)
 522		goto err_register;
 523
 524	/* carrier off reporting is important to ethtool even BEFORE open */
 525	netif_carrier_off(netdev);
 526
 527	netif_info(adapter, probe, adapter->netdev,
 528		   "Intel(R) PRO/10GbE Network Connection\n");
 529	ixgb_check_options(adapter);
 530	/* reset the hardware with the new settings */
 531
 532	ixgb_reset(adapter);
 533
 534	cards_found++;
 535	return 0;
 536
 537err_register:
 538err_sw_init:
 539err_eeprom:
 540	iounmap(adapter->hw.hw_addr);
 541err_ioremap:
 542	free_netdev(netdev);
 543err_alloc_etherdev:
 544	pci_release_regions(pdev);
 545err_request_regions:
 546err_dma_mask:
 547	pci_disable_device(pdev);
 548	return err;
 549}
 550
 551/**
 552 * ixgb_remove - Device Removal Routine
 553 * @pdev: PCI device information struct
 554 *
 555 * ixgb_remove is called by the PCI subsystem to alert the driver
 556 * that it should release a PCI device.  The could be caused by a
 557 * Hot-Plug event, or because the driver is going to be removed from
 558 * memory.
 559 **/
 560
 561static void
 562ixgb_remove(struct pci_dev *pdev)
 563{
 564	struct net_device *netdev = pci_get_drvdata(pdev);
 565	struct ixgb_adapter *adapter = netdev_priv(netdev);
 566
 567	cancel_work_sync(&adapter->tx_timeout_task);
 568
 569	unregister_netdev(netdev);
 570
 571	iounmap(adapter->hw.hw_addr);
 572	pci_release_regions(pdev);
 573
 574	free_netdev(netdev);
 575	pci_disable_device(pdev);
 576}
 577
 578/**
 579 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
 580 * @adapter: board private structure to initialize
 581 *
 582 * ixgb_sw_init initializes the Adapter private data structure.
 583 * Fields are initialized based on PCI device information and
 584 * OS network device settings (MTU size).
 585 **/
 586
 587static int
 588ixgb_sw_init(struct ixgb_adapter *adapter)
 589{
 590	struct ixgb_hw *hw = &adapter->hw;
 591	struct net_device *netdev = adapter->netdev;
 592	struct pci_dev *pdev = adapter->pdev;
 593
 594	/* PCI config space info */
 595
 596	hw->vendor_id = pdev->vendor;
 597	hw->device_id = pdev->device;
 598	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 599	hw->subsystem_id = pdev->subsystem_device;
 600
 601	hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 602	adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
 603
 604	if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
 605	    (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
 606	    (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
 607	    (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
 608		hw->mac_type = ixgb_82597;
 609	else {
 610		/* should never have loaded on this device */
 611		netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
 612	}
 613
 614	/* enable flow control to be programmed */
 615	hw->fc.send_xon = 1;
 616
 617	set_bit(__IXGB_DOWN, &adapter->flags);
 618	return 0;
 619}
 620
 621/**
 622 * ixgb_open - Called when a network interface is made active
 623 * @netdev: network interface device structure
 624 *
 625 * Returns 0 on success, negative value on failure
 626 *
 627 * The open entry point is called when a network interface is made
 628 * active by the system (IFF_UP).  At this point all resources needed
 629 * for transmit and receive operations are allocated, the interrupt
 630 * handler is registered with the OS, the watchdog timer is started,
 631 * and the stack is notified that the interface is ready.
 632 **/
 633
 634static int
 635ixgb_open(struct net_device *netdev)
 636{
 637	struct ixgb_adapter *adapter = netdev_priv(netdev);
 638	int err;
 639
 640	/* allocate transmit descriptors */
 641	err = ixgb_setup_tx_resources(adapter);
 642	if (err)
 643		goto err_setup_tx;
 644
 645	netif_carrier_off(netdev);
 646
 647	/* allocate receive descriptors */
 648
 649	err = ixgb_setup_rx_resources(adapter);
 650	if (err)
 651		goto err_setup_rx;
 652
 653	err = ixgb_up(adapter);
 654	if (err)
 655		goto err_up;
 656
 657	netif_start_queue(netdev);
 658
 659	return 0;
 660
 661err_up:
 662	ixgb_free_rx_resources(adapter);
 663err_setup_rx:
 664	ixgb_free_tx_resources(adapter);
 665err_setup_tx:
 666	ixgb_reset(adapter);
 667
 668	return err;
 669}
 670
 671/**
 672 * ixgb_close - Disables a network interface
 673 * @netdev: network interface device structure
 674 *
 675 * Returns 0, this is not allowed to fail
 676 *
 677 * The close entry point is called when an interface is de-activated
 678 * by the OS.  The hardware is still under the drivers control, but
 679 * needs to be disabled.  A global MAC reset is issued to stop the
 680 * hardware, and all transmit and receive resources are freed.
 681 **/
 682
 683static int
 684ixgb_close(struct net_device *netdev)
 685{
 686	struct ixgb_adapter *adapter = netdev_priv(netdev);
 687
 688	ixgb_down(adapter, true);
 689
 690	ixgb_free_tx_resources(adapter);
 691	ixgb_free_rx_resources(adapter);
 692
 693	return 0;
 694}
 695
 696/**
 697 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
 698 * @adapter: board private structure
 699 *
 700 * Return 0 on success, negative on failure
 701 **/
 702
 703int
 704ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
 705{
 706	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 707	struct pci_dev *pdev = adapter->pdev;
 708	int size;
 709
 710	size = sizeof(struct ixgb_buffer) * txdr->count;
 711	txdr->buffer_info = vzalloc(size);
 712	if (!txdr->buffer_info)
 713		return -ENOMEM;
 714
 715	/* round up to nearest 4K */
 716
 717	txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
 718	txdr->size = ALIGN(txdr->size, 4096);
 719
 720	txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
 721					 GFP_KERNEL);
 722	if (!txdr->desc) {
 723		vfree(txdr->buffer_info);
 724		return -ENOMEM;
 725	}
 726
 727	txdr->next_to_use = 0;
 728	txdr->next_to_clean = 0;
 729
 730	return 0;
 731}
 732
 733/**
 734 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
 735 * @adapter: board private structure
 736 *
 737 * Configure the Tx unit of the MAC after a reset.
 738 **/
 739
 740static void
 741ixgb_configure_tx(struct ixgb_adapter *adapter)
 742{
 743	u64 tdba = adapter->tx_ring.dma;
 744	u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
 745	u32 tctl;
 746	struct ixgb_hw *hw = &adapter->hw;
 747
 748	/* Setup the Base and Length of the Tx Descriptor Ring
 749	 * tx_ring.dma can be either a 32 or 64 bit value
 750	 */
 751
 752	IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
 753	IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
 754
 755	IXGB_WRITE_REG(hw, TDLEN, tdlen);
 756
 757	/* Setup the HW Tx Head and Tail descriptor pointers */
 758
 759	IXGB_WRITE_REG(hw, TDH, 0);
 760	IXGB_WRITE_REG(hw, TDT, 0);
 761
 762	/* don't set up txdctl, it induces performance problems if configured
 763	 * incorrectly */
 764	/* Set the Tx Interrupt Delay register */
 765
 766	IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
 767
 768	/* Program the Transmit Control Register */
 769
 770	tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
 771	IXGB_WRITE_REG(hw, TCTL, tctl);
 772
 773	/* Setup Transmit Descriptor Settings for this adapter */
 774	adapter->tx_cmd_type =
 775		IXGB_TX_DESC_TYPE |
 776		(adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
 777}
 778
 779/**
 780 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
 781 * @adapter: board private structure
 782 *
 783 * Returns 0 on success, negative on failure
 784 **/
 785
 786int
 787ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
 788{
 789	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 790	struct pci_dev *pdev = adapter->pdev;
 791	int size;
 792
 793	size = sizeof(struct ixgb_buffer) * rxdr->count;
 794	rxdr->buffer_info = vzalloc(size);
 795	if (!rxdr->buffer_info)
 796		return -ENOMEM;
 797
 798	/* Round up to nearest 4K */
 799
 800	rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
 801	rxdr->size = ALIGN(rxdr->size, 4096);
 802
 803	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
 804					GFP_KERNEL);
 805
 806	if (!rxdr->desc) {
 807		vfree(rxdr->buffer_info);
 808		return -ENOMEM;
 809	}
 810	memset(rxdr->desc, 0, rxdr->size);
 811
 812	rxdr->next_to_clean = 0;
 813	rxdr->next_to_use = 0;
 814
 815	return 0;
 816}
 817
 818/**
 819 * ixgb_setup_rctl - configure the receive control register
 820 * @adapter: Board private structure
 821 **/
 822
 823static void
 824ixgb_setup_rctl(struct ixgb_adapter *adapter)
 825{
 826	u32 rctl;
 827
 828	rctl = IXGB_READ_REG(&adapter->hw, RCTL);
 829
 830	rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
 831
 832	rctl |=
 833		IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
 834		IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
 835		(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
 836
 837	rctl |= IXGB_RCTL_SECRC;
 838
 839	if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
 840		rctl |= IXGB_RCTL_BSIZE_2048;
 841	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
 842		rctl |= IXGB_RCTL_BSIZE_4096;
 843	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
 844		rctl |= IXGB_RCTL_BSIZE_8192;
 845	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
 846		rctl |= IXGB_RCTL_BSIZE_16384;
 847
 848	IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 849}
 850
 851/**
 852 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
 853 * @adapter: board private structure
 854 *
 855 * Configure the Rx unit of the MAC after a reset.
 856 **/
 857
 858static void
 859ixgb_configure_rx(struct ixgb_adapter *adapter)
 860{
 861	u64 rdba = adapter->rx_ring.dma;
 862	u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
 863	struct ixgb_hw *hw = &adapter->hw;
 864	u32 rctl;
 865	u32 rxcsum;
 866
 867	/* make sure receives are disabled while setting up the descriptors */
 868
 869	rctl = IXGB_READ_REG(hw, RCTL);
 870	IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
 871
 872	/* set the Receive Delay Timer Register */
 873
 874	IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
 875
 876	/* Setup the Base and Length of the Rx Descriptor Ring */
 877
 878	IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
 879	IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
 880
 881	IXGB_WRITE_REG(hw, RDLEN, rdlen);
 882
 883	/* Setup the HW Rx Head and Tail Descriptor Pointers */
 884	IXGB_WRITE_REG(hw, RDH, 0);
 885	IXGB_WRITE_REG(hw, RDT, 0);
 886
 887	/* due to the hardware errata with RXDCTL, we are unable to use any of
 888	 * the performance enhancing features of it without causing other
 889	 * subtle bugs, some of the bugs could include receive length
 890	 * corruption at high data rates (WTHRESH > 0) and/or receive
 891	 * descriptor ring irregularites (particularly in hardware cache) */
 892	IXGB_WRITE_REG(hw, RXDCTL, 0);
 893
 894	/* Enable Receive Checksum Offload for TCP and UDP */
 895	if (adapter->rx_csum) {
 896		rxcsum = IXGB_READ_REG(hw, RXCSUM);
 897		rxcsum |= IXGB_RXCSUM_TUOFL;
 898		IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
 899	}
 900
 901	/* Enable Receives */
 902
 903	IXGB_WRITE_REG(hw, RCTL, rctl);
 904}
 905
 906/**
 907 * ixgb_free_tx_resources - Free Tx Resources
 908 * @adapter: board private structure
 909 *
 910 * Free all transmit software resources
 911 **/
 912
 913void
 914ixgb_free_tx_resources(struct ixgb_adapter *adapter)
 915{
 916	struct pci_dev *pdev = adapter->pdev;
 917
 918	ixgb_clean_tx_ring(adapter);
 919
 920	vfree(adapter->tx_ring.buffer_info);
 921	adapter->tx_ring.buffer_info = NULL;
 922
 923	dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
 924			  adapter->tx_ring.desc, adapter->tx_ring.dma);
 925
 926	adapter->tx_ring.desc = NULL;
 927}
 928
 929static void
 930ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
 931                                struct ixgb_buffer *buffer_info)
 932{
 933	if (buffer_info->dma) {
 934		if (buffer_info->mapped_as_page)
 935			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
 936				       buffer_info->length, DMA_TO_DEVICE);
 937		else
 938			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 939					 buffer_info->length, DMA_TO_DEVICE);
 940		buffer_info->dma = 0;
 941	}
 942
 943	if (buffer_info->skb) {
 944		dev_kfree_skb_any(buffer_info->skb);
 945		buffer_info->skb = NULL;
 946	}
 947	buffer_info->time_stamp = 0;
 948	/* these fields must always be initialized in tx
 949	 * buffer_info->length = 0;
 950	 * buffer_info->next_to_watch = 0; */
 951}
 952
 953/**
 954 * ixgb_clean_tx_ring - Free Tx Buffers
 955 * @adapter: board private structure
 956 **/
 957
 958static void
 959ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
 960{
 961	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
 962	struct ixgb_buffer *buffer_info;
 963	unsigned long size;
 964	unsigned int i;
 965
 966	/* Free all the Tx ring sk_buffs */
 967
 968	for (i = 0; i < tx_ring->count; i++) {
 969		buffer_info = &tx_ring->buffer_info[i];
 970		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
 971	}
 972
 973	size = sizeof(struct ixgb_buffer) * tx_ring->count;
 974	memset(tx_ring->buffer_info, 0, size);
 975
 976	/* Zero out the descriptor ring */
 977
 978	memset(tx_ring->desc, 0, tx_ring->size);
 979
 980	tx_ring->next_to_use = 0;
 981	tx_ring->next_to_clean = 0;
 982
 983	IXGB_WRITE_REG(&adapter->hw, TDH, 0);
 984	IXGB_WRITE_REG(&adapter->hw, TDT, 0);
 985}
 986
 987/**
 988 * ixgb_free_rx_resources - Free Rx Resources
 989 * @adapter: board private structure
 990 *
 991 * Free all receive software resources
 992 **/
 993
 994void
 995ixgb_free_rx_resources(struct ixgb_adapter *adapter)
 996{
 997	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
 998	struct pci_dev *pdev = adapter->pdev;
 999
1000	ixgb_clean_rx_ring(adapter);
1001
1002	vfree(rx_ring->buffer_info);
1003	rx_ring->buffer_info = NULL;
1004
1005	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1006			  rx_ring->dma);
1007
1008	rx_ring->desc = NULL;
1009}
1010
1011/**
1012 * ixgb_clean_rx_ring - Free Rx Buffers
1013 * @adapter: board private structure
1014 **/
1015
1016static void
1017ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1018{
1019	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1020	struct ixgb_buffer *buffer_info;
1021	struct pci_dev *pdev = adapter->pdev;
1022	unsigned long size;
1023	unsigned int i;
1024
1025	/* Free all the Rx ring sk_buffs */
1026
1027	for (i = 0; i < rx_ring->count; i++) {
1028		buffer_info = &rx_ring->buffer_info[i];
1029		if (buffer_info->dma) {
1030			dma_unmap_single(&pdev->dev,
1031					 buffer_info->dma,
1032					 buffer_info->length,
1033					 DMA_FROM_DEVICE);
1034			buffer_info->dma = 0;
1035			buffer_info->length = 0;
1036		}
1037
1038		if (buffer_info->skb) {
1039			dev_kfree_skb(buffer_info->skb);
1040			buffer_info->skb = NULL;
1041		}
1042	}
1043
1044	size = sizeof(struct ixgb_buffer) * rx_ring->count;
1045	memset(rx_ring->buffer_info, 0, size);
1046
1047	/* Zero out the descriptor ring */
1048
1049	memset(rx_ring->desc, 0, rx_ring->size);
1050
1051	rx_ring->next_to_clean = 0;
1052	rx_ring->next_to_use = 0;
1053
1054	IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1055	IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1056}
1057
1058/**
1059 * ixgb_set_mac - Change the Ethernet Address of the NIC
1060 * @netdev: network interface device structure
1061 * @p: pointer to an address structure
1062 *
1063 * Returns 0 on success, negative on failure
1064 **/
1065
1066static int
1067ixgb_set_mac(struct net_device *netdev, void *p)
1068{
1069	struct ixgb_adapter *adapter = netdev_priv(netdev);
1070	struct sockaddr *addr = p;
1071
1072	if (!is_valid_ether_addr(addr->sa_data))
1073		return -EADDRNOTAVAIL;
1074
1075	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1076
1077	ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1078
1079	return 0;
1080}
1081
1082/**
1083 * ixgb_set_multi - Multicast and Promiscuous mode set
1084 * @netdev: network interface device structure
1085 *
1086 * The set_multi entry point is called whenever the multicast address
1087 * list or the network interface flags are updated.  This routine is
1088 * responsible for configuring the hardware for proper multicast,
1089 * promiscuous mode, and all-multi behavior.
1090 **/
1091
1092static void
1093ixgb_set_multi(struct net_device *netdev)
1094{
1095	struct ixgb_adapter *adapter = netdev_priv(netdev);
1096	struct ixgb_hw *hw = &adapter->hw;
1097	struct netdev_hw_addr *ha;
1098	u32 rctl;
1099
1100	/* Check for Promiscuous and All Multicast modes */
1101
1102	rctl = IXGB_READ_REG(hw, RCTL);
1103
1104	if (netdev->flags & IFF_PROMISC) {
1105		rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1106		/* disable VLAN filtering */
1107		rctl &= ~IXGB_RCTL_CFIEN;
1108		rctl &= ~IXGB_RCTL_VFE;
1109	} else {
1110		if (netdev->flags & IFF_ALLMULTI) {
1111			rctl |= IXGB_RCTL_MPE;
1112			rctl &= ~IXGB_RCTL_UPE;
1113		} else {
1114			rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1115		}
1116		/* enable VLAN filtering */
1117		rctl |= IXGB_RCTL_VFE;
1118		rctl &= ~IXGB_RCTL_CFIEN;
1119	}
1120
1121	if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1122		rctl |= IXGB_RCTL_MPE;
1123		IXGB_WRITE_REG(hw, RCTL, rctl);
1124	} else {
1125		u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1126			      ETH_ALEN, GFP_ATOMIC);
1127		u8 *addr;
1128		if (!mta)
1129			goto alloc_failed;
1130
1131		IXGB_WRITE_REG(hw, RCTL, rctl);
1132
1133		addr = mta;
1134		netdev_for_each_mc_addr(ha, netdev) {
1135			memcpy(addr, ha->addr, ETH_ALEN);
1136			addr += ETH_ALEN;
1137		}
1138
1139		ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1140		kfree(mta);
1141	}
1142
1143alloc_failed:
1144	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1145		ixgb_vlan_strip_enable(adapter);
1146	else
1147		ixgb_vlan_strip_disable(adapter);
1148
1149}
1150
1151/**
1152 * ixgb_watchdog - Timer Call-back
1153 * @data: pointer to netdev cast into an unsigned long
1154 **/
1155
1156static void
1157ixgb_watchdog(unsigned long data)
1158{
1159	struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1160	struct net_device *netdev = adapter->netdev;
1161	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1162
1163	ixgb_check_for_link(&adapter->hw);
1164
1165	if (ixgb_check_for_bad_link(&adapter->hw)) {
1166		/* force the reset path */
1167		netif_stop_queue(netdev);
1168	}
1169
1170	if (adapter->hw.link_up) {
1171		if (!netif_carrier_ok(netdev)) {
1172			netdev_info(netdev,
1173				    "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1174				    (adapter->hw.fc.type == ixgb_fc_full) ?
1175				    "RX/TX" :
1176				    (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1177				     "RX" :
1178				    (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1179				    "TX" : "None");
1180			adapter->link_speed = 10000;
1181			adapter->link_duplex = FULL_DUPLEX;
1182			netif_carrier_on(netdev);
1183		}
1184	} else {
1185		if (netif_carrier_ok(netdev)) {
1186			adapter->link_speed = 0;
1187			adapter->link_duplex = 0;
1188			netdev_info(netdev, "NIC Link is Down\n");
1189			netif_carrier_off(netdev);
1190		}
1191	}
1192
1193	ixgb_update_stats(adapter);
1194
1195	if (!netif_carrier_ok(netdev)) {
1196		if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1197			/* We've lost link, so the controller stops DMA,
1198			 * but we've got queued Tx work that's never going
1199			 * to get done, so reset controller to flush Tx.
1200			 * (Do the reset outside of interrupt context). */
1201			schedule_work(&adapter->tx_timeout_task);
1202			/* return immediately since reset is imminent */
1203			return;
1204		}
1205	}
1206
1207	/* Force detection of hung controller every watchdog period */
1208	adapter->detect_tx_hung = true;
1209
1210	/* generate an interrupt to force clean up of any stragglers */
1211	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1212
1213	/* Reset the timer */
1214	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1215}
1216
1217#define IXGB_TX_FLAGS_CSUM		0x00000001
1218#define IXGB_TX_FLAGS_VLAN		0x00000002
1219#define IXGB_TX_FLAGS_TSO		0x00000004
1220
1221static int
1222ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1223{
1224	struct ixgb_context_desc *context_desc;
1225	unsigned int i;
1226	u8 ipcss, ipcso, tucss, tucso, hdr_len;
1227	u16 ipcse, tucse, mss;
1228
1229	if (likely(skb_is_gso(skb))) {
1230		struct ixgb_buffer *buffer_info;
1231		struct iphdr *iph;
1232		int err;
1233
1234		err = skb_cow_head(skb, 0);
1235		if (err < 0)
1236			return err;
1237
1238		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1239		mss = skb_shinfo(skb)->gso_size;
1240		iph = ip_hdr(skb);
1241		iph->tot_len = 0;
1242		iph->check = 0;
1243		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1244							 iph->daddr, 0,
1245							 IPPROTO_TCP, 0);
1246		ipcss = skb_network_offset(skb);
1247		ipcso = (void *)&(iph->check) - (void *)skb->data;
1248		ipcse = skb_transport_offset(skb) - 1;
1249		tucss = skb_transport_offset(skb);
1250		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1251		tucse = 0;
1252
1253		i = adapter->tx_ring.next_to_use;
1254		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1255		buffer_info = &adapter->tx_ring.buffer_info[i];
1256		WARN_ON(buffer_info->dma != 0);
1257
1258		context_desc->ipcss = ipcss;
1259		context_desc->ipcso = ipcso;
1260		context_desc->ipcse = cpu_to_le16(ipcse);
1261		context_desc->tucss = tucss;
1262		context_desc->tucso = tucso;
1263		context_desc->tucse = cpu_to_le16(tucse);
1264		context_desc->mss = cpu_to_le16(mss);
1265		context_desc->hdr_len = hdr_len;
1266		context_desc->status = 0;
1267		context_desc->cmd_type_len = cpu_to_le32(
1268						  IXGB_CONTEXT_DESC_TYPE
1269						| IXGB_CONTEXT_DESC_CMD_TSE
1270						| IXGB_CONTEXT_DESC_CMD_IP
1271						| IXGB_CONTEXT_DESC_CMD_TCP
1272						| IXGB_CONTEXT_DESC_CMD_IDE
1273						| (skb->len - (hdr_len)));
1274
1275
1276		if (++i == adapter->tx_ring.count) i = 0;
1277		adapter->tx_ring.next_to_use = i;
1278
1279		return 1;
1280	}
1281
1282	return 0;
1283}
1284
1285static bool
1286ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1287{
1288	struct ixgb_context_desc *context_desc;
1289	unsigned int i;
1290	u8 css, cso;
1291
1292	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1293		struct ixgb_buffer *buffer_info;
1294		css = skb_checksum_start_offset(skb);
1295		cso = css + skb->csum_offset;
1296
1297		i = adapter->tx_ring.next_to_use;
1298		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1299		buffer_info = &adapter->tx_ring.buffer_info[i];
1300		WARN_ON(buffer_info->dma != 0);
1301
1302		context_desc->tucss = css;
1303		context_desc->tucso = cso;
1304		context_desc->tucse = 0;
1305		/* zero out any previously existing data in one instruction */
1306		*(u32 *)&(context_desc->ipcss) = 0;
1307		context_desc->status = 0;
1308		context_desc->hdr_len = 0;
1309		context_desc->mss = 0;
1310		context_desc->cmd_type_len =
1311			cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1312				    | IXGB_TX_DESC_CMD_IDE);
1313
1314		if (++i == adapter->tx_ring.count) i = 0;
1315		adapter->tx_ring.next_to_use = i;
1316
1317		return true;
1318	}
1319
1320	return false;
1321}
1322
1323#define IXGB_MAX_TXD_PWR	14
1324#define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)
1325
1326static int
1327ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1328	    unsigned int first)
1329{
1330	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1331	struct pci_dev *pdev = adapter->pdev;
1332	struct ixgb_buffer *buffer_info;
1333	int len = skb_headlen(skb);
1334	unsigned int offset = 0, size, count = 0, i;
1335	unsigned int mss = skb_shinfo(skb)->gso_size;
1336	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1337	unsigned int f;
1338
1339	i = tx_ring->next_to_use;
1340
1341	while (len) {
1342		buffer_info = &tx_ring->buffer_info[i];
1343		size = min(len, IXGB_MAX_DATA_PER_TXD);
1344		/* Workaround for premature desc write-backs
1345		 * in TSO mode.  Append 4-byte sentinel desc */
1346		if (unlikely(mss && !nr_frags && size == len && size > 8))
1347			size -= 4;
1348
1349		buffer_info->length = size;
1350		WARN_ON(buffer_info->dma != 0);
1351		buffer_info->time_stamp = jiffies;
1352		buffer_info->mapped_as_page = false;
1353		buffer_info->dma = dma_map_single(&pdev->dev,
1354						  skb->data + offset,
1355						  size, DMA_TO_DEVICE);
1356		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1357			goto dma_error;
1358		buffer_info->next_to_watch = 0;
1359
1360		len -= size;
1361		offset += size;
1362		count++;
1363		if (len) {
1364			i++;
1365			if (i == tx_ring->count)
1366				i = 0;
1367		}
1368	}
1369
1370	for (f = 0; f < nr_frags; f++) {
1371		const struct skb_frag_struct *frag;
1372
1373		frag = &skb_shinfo(skb)->frags[f];
1374		len = skb_frag_size(frag);
1375		offset = 0;
1376
1377		while (len) {
1378			i++;
1379			if (i == tx_ring->count)
1380				i = 0;
1381
1382			buffer_info = &tx_ring->buffer_info[i];
1383			size = min(len, IXGB_MAX_DATA_PER_TXD);
1384
1385			/* Workaround for premature desc write-backs
1386			 * in TSO mode.  Append 4-byte sentinel desc */
1387			if (unlikely(mss && (f == (nr_frags - 1))
1388				     && size == len && size > 8))
1389				size -= 4;
1390
1391			buffer_info->length = size;
1392			buffer_info->time_stamp = jiffies;
1393			buffer_info->mapped_as_page = true;
1394			buffer_info->dma =
1395				skb_frag_dma_map(&pdev->dev, frag, offset, size,
1396						 DMA_TO_DEVICE);
1397			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1398				goto dma_error;
1399			buffer_info->next_to_watch = 0;
1400
1401			len -= size;
1402			offset += size;
1403			count++;
1404		}
1405	}
1406	tx_ring->buffer_info[i].skb = skb;
1407	tx_ring->buffer_info[first].next_to_watch = i;
1408
1409	return count;
1410
1411dma_error:
1412	dev_err(&pdev->dev, "TX DMA map failed\n");
1413	buffer_info->dma = 0;
1414	if (count)
1415		count--;
1416
1417	while (count--) {
1418		if (i==0)
1419			i += tx_ring->count;
1420		i--;
1421		buffer_info = &tx_ring->buffer_info[i];
1422		ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1423	}
1424
1425	return 0;
1426}
1427
1428static void
1429ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1430{
1431	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1432	struct ixgb_tx_desc *tx_desc = NULL;
1433	struct ixgb_buffer *buffer_info;
1434	u32 cmd_type_len = adapter->tx_cmd_type;
1435	u8 status = 0;
1436	u8 popts = 0;
1437	unsigned int i;
1438
1439	if (tx_flags & IXGB_TX_FLAGS_TSO) {
1440		cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1441		popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1442	}
1443
1444	if (tx_flags & IXGB_TX_FLAGS_CSUM)
1445		popts |= IXGB_TX_DESC_POPTS_TXSM;
1446
1447	if (tx_flags & IXGB_TX_FLAGS_VLAN)
1448		cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1449
1450	i = tx_ring->next_to_use;
1451
1452	while (count--) {
1453		buffer_info = &tx_ring->buffer_info[i];
1454		tx_desc = IXGB_TX_DESC(*tx_ring, i);
1455		tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1456		tx_desc->cmd_type_len =
1457			cpu_to_le32(cmd_type_len | buffer_info->length);
1458		tx_desc->status = status;
1459		tx_desc->popts = popts;
1460		tx_desc->vlan = cpu_to_le16(vlan_id);
1461
1462		if (++i == tx_ring->count) i = 0;
1463	}
1464
1465	tx_desc->cmd_type_len |=
1466		cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
1467
1468	/* Force memory writes to complete before letting h/w
1469	 * know there are new descriptors to fetch.  (Only
1470	 * applicable for weak-ordered memory model archs,
1471	 * such as IA-64). */
1472	wmb();
1473
1474	tx_ring->next_to_use = i;
1475	IXGB_WRITE_REG(&adapter->hw, TDT, i);
1476}
1477
1478static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1479{
1480	struct ixgb_adapter *adapter = netdev_priv(netdev);
1481	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1482
1483	netif_stop_queue(netdev);
1484	/* Herbert's original patch had:
1485	 *  smp_mb__after_netif_stop_queue();
1486	 * but since that doesn't exist yet, just open code it. */
1487	smp_mb();
1488
1489	/* We need to check again in a case another CPU has just
1490	 * made room available. */
1491	if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1492		return -EBUSY;
1493
1494	/* A reprieve! */
1495	netif_start_queue(netdev);
1496	++adapter->restart_queue;
1497	return 0;
1498}
1499
1500static int ixgb_maybe_stop_tx(struct net_device *netdev,
1501                              struct ixgb_desc_ring *tx_ring, int size)
1502{
1503	if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1504		return 0;
1505	return __ixgb_maybe_stop_tx(netdev, size);
1506}
1507
1508
1509/* Tx Descriptors needed, worst case */
1510#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1511			 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1512#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1513	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1514	+ 1 /* one more needed for sentinel TSO workaround */
1515
1516static netdev_tx_t
1517ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1518{
1519	struct ixgb_adapter *adapter = netdev_priv(netdev);
1520	unsigned int first;
1521	unsigned int tx_flags = 0;
1522	int vlan_id = 0;
1523	int count = 0;
1524	int tso;
1525
1526	if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1527		dev_kfree_skb_any(skb);
1528		return NETDEV_TX_OK;
1529	}
1530
1531	if (skb->len <= 0) {
1532		dev_kfree_skb_any(skb);
1533		return NETDEV_TX_OK;
1534	}
1535
1536	if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1537                     DESC_NEEDED)))
1538		return NETDEV_TX_BUSY;
1539
1540	if (skb_vlan_tag_present(skb)) {
1541		tx_flags |= IXGB_TX_FLAGS_VLAN;
1542		vlan_id = skb_vlan_tag_get(skb);
1543	}
1544
1545	first = adapter->tx_ring.next_to_use;
1546
1547	tso = ixgb_tso(adapter, skb);
1548	if (tso < 0) {
1549		dev_kfree_skb_any(skb);
1550		return NETDEV_TX_OK;
1551	}
1552
1553	if (likely(tso))
1554		tx_flags |= IXGB_TX_FLAGS_TSO;
1555	else if (ixgb_tx_csum(adapter, skb))
1556		tx_flags |= IXGB_TX_FLAGS_CSUM;
1557
1558	count = ixgb_tx_map(adapter, skb, first);
1559
1560	if (count) {
1561		ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1562		/* Make sure there is space in the ring for the next send. */
1563		ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1564
1565	} else {
1566		dev_kfree_skb_any(skb);
1567		adapter->tx_ring.buffer_info[first].time_stamp = 0;
1568		adapter->tx_ring.next_to_use = first;
1569	}
1570
1571	return NETDEV_TX_OK;
1572}
1573
1574/**
1575 * ixgb_tx_timeout - Respond to a Tx Hang
1576 * @netdev: network interface device structure
1577 **/
1578
1579static void
1580ixgb_tx_timeout(struct net_device *netdev)
1581{
1582	struct ixgb_adapter *adapter = netdev_priv(netdev);
1583
1584	/* Do the reset outside of interrupt context */
1585	schedule_work(&adapter->tx_timeout_task);
1586}
1587
1588static void
1589ixgb_tx_timeout_task(struct work_struct *work)
1590{
1591	struct ixgb_adapter *adapter =
1592		container_of(work, struct ixgb_adapter, tx_timeout_task);
1593
1594	adapter->tx_timeout_count++;
1595	ixgb_down(adapter, true);
1596	ixgb_up(adapter);
1597}
1598
1599/**
1600 * ixgb_get_stats - Get System Network Statistics
1601 * @netdev: network interface device structure
1602 *
1603 * Returns the address of the device statistics structure.
1604 * The statistics are actually updated from the timer callback.
1605 **/
1606
1607static struct net_device_stats *
1608ixgb_get_stats(struct net_device *netdev)
1609{
1610	return &netdev->stats;
1611}
1612
1613/**
1614 * ixgb_change_mtu - Change the Maximum Transfer Unit
1615 * @netdev: network interface device structure
1616 * @new_mtu: new value for maximum frame size
1617 *
1618 * Returns 0 on success, negative on failure
1619 **/
1620
1621static int
1622ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1623{
1624	struct ixgb_adapter *adapter = netdev_priv(netdev);
1625	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1626
1627	if (netif_running(netdev))
1628		ixgb_down(adapter, true);
1629
1630	adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1631
1632	netdev->mtu = new_mtu;
1633
1634	if (netif_running(netdev))
1635		ixgb_up(adapter);
1636
1637	return 0;
1638}
1639
1640/**
1641 * ixgb_update_stats - Update the board statistics counters.
1642 * @adapter: board private structure
1643 **/
1644
1645void
1646ixgb_update_stats(struct ixgb_adapter *adapter)
1647{
1648	struct net_device *netdev = adapter->netdev;
1649	struct pci_dev *pdev = adapter->pdev;
1650
1651	/* Prevent stats update while adapter is being reset */
1652	if (pci_channel_offline(pdev))
1653		return;
1654
1655	if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1656	   (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1657		u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1658		u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1659		u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1660		u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1661
1662		multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1663		/* fix up multicast stats by removing broadcasts */
1664		if (multi >= bcast)
1665			multi -= bcast;
1666
1667		adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1668		adapter->stats.mprch += (multi >> 32);
1669		adapter->stats.bprcl += bcast_l;
1670		adapter->stats.bprch += bcast_h;
1671	} else {
1672		adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1673		adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1674		adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1675		adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1676	}
1677	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1678	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1679	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1680	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1681	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1682	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1683	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1684	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1685	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1686	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1687	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1688	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1689	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1690	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1691	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1692	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1693	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1694	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1695	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1696	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1697	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1698	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1699	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1700	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1701	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1702	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1703	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1704	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1705	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1706	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1707	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1708	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1709	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1710	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1711	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1712	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1713	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1714	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1715	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1716	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1717	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1718	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1719	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1720	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1721	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1722	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1723	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1724	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1725	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1726	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1727	adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1728	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1729	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1730	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1731	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1732	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1733
1734	/* Fill out the OS statistics structure */
1735
1736	netdev->stats.rx_packets = adapter->stats.gprcl;
1737	netdev->stats.tx_packets = adapter->stats.gptcl;
1738	netdev->stats.rx_bytes = adapter->stats.gorcl;
1739	netdev->stats.tx_bytes = adapter->stats.gotcl;
1740	netdev->stats.multicast = adapter->stats.mprcl;
1741	netdev->stats.collisions = 0;
1742
1743	/* ignore RLEC as it reports errors for padded (<64bytes) frames
1744	 * with a length in the type/len field */
1745	netdev->stats.rx_errors =
1746	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1747	    adapter->stats.ruc +
1748	    adapter->stats.roc /*+ adapter->stats.rlec */  +
1749	    adapter->stats.icbc +
1750	    adapter->stats.ecbc + adapter->stats.mpc;
1751
1752	/* see above
1753	 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1754	 */
1755
1756	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1757	netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1758	netdev->stats.rx_missed_errors = adapter->stats.mpc;
1759	netdev->stats.rx_over_errors = adapter->stats.mpc;
1760
1761	netdev->stats.tx_errors = 0;
1762	netdev->stats.rx_frame_errors = 0;
1763	netdev->stats.tx_aborted_errors = 0;
1764	netdev->stats.tx_carrier_errors = 0;
1765	netdev->stats.tx_fifo_errors = 0;
1766	netdev->stats.tx_heartbeat_errors = 0;
1767	netdev->stats.tx_window_errors = 0;
1768}
1769
1770#define IXGB_MAX_INTR 10
1771/**
1772 * ixgb_intr - Interrupt Handler
1773 * @irq: interrupt number
1774 * @data: pointer to a network interface device structure
1775 **/
1776
1777static irqreturn_t
1778ixgb_intr(int irq, void *data)
1779{
1780	struct net_device *netdev = data;
1781	struct ixgb_adapter *adapter = netdev_priv(netdev);
1782	struct ixgb_hw *hw = &adapter->hw;
1783	u32 icr = IXGB_READ_REG(hw, ICR);
1784
1785	if (unlikely(!icr))
1786		return IRQ_NONE;  /* Not our interrupt */
1787
1788	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1789		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1790			mod_timer(&adapter->watchdog_timer, jiffies);
1791
1792	if (napi_schedule_prep(&adapter->napi)) {
1793
1794		/* Disable interrupts and register for poll. The flush
1795		  of the posted write is intentionally left out.
1796		*/
1797
1798		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1799		__napi_schedule(&adapter->napi);
1800	}
1801	return IRQ_HANDLED;
1802}
1803
1804/**
1805 * ixgb_clean - NAPI Rx polling callback
1806 * @adapter: board private structure
1807 **/
1808
1809static int
1810ixgb_clean(struct napi_struct *napi, int budget)
1811{
1812	struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1813	int work_done = 0;
1814
1815	ixgb_clean_tx_irq(adapter);
1816	ixgb_clean_rx_irq(adapter, &work_done, budget);
1817
1818	/* If budget not fully consumed, exit the polling mode */
1819	if (work_done < budget) {
1820		napi_complete(napi);
1821		if (!test_bit(__IXGB_DOWN, &adapter->flags))
1822			ixgb_irq_enable(adapter);
1823	}
1824
1825	return work_done;
1826}
1827
1828/**
1829 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1830 * @adapter: board private structure
1831 **/
1832
1833static bool
1834ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1835{
1836	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1837	struct net_device *netdev = adapter->netdev;
1838	struct ixgb_tx_desc *tx_desc, *eop_desc;
1839	struct ixgb_buffer *buffer_info;
1840	unsigned int i, eop;
1841	bool cleaned = false;
1842
1843	i = tx_ring->next_to_clean;
1844	eop = tx_ring->buffer_info[i].next_to_watch;
1845	eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1846
1847	while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1848
1849		rmb(); /* read buffer_info after eop_desc */
1850		for (cleaned = false; !cleaned; ) {
1851			tx_desc = IXGB_TX_DESC(*tx_ring, i);
1852			buffer_info = &tx_ring->buffer_info[i];
1853
1854			if (tx_desc->popts &
1855			   (IXGB_TX_DESC_POPTS_TXSM |
1856			    IXGB_TX_DESC_POPTS_IXSM))
1857				adapter->hw_csum_tx_good++;
1858
1859			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1860
1861			*(u32 *)&(tx_desc->status) = 0;
1862
1863			cleaned = (i == eop);
1864			if (++i == tx_ring->count) i = 0;
1865		}
1866
1867		eop = tx_ring->buffer_info[i].next_to_watch;
1868		eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1869	}
1870
1871	tx_ring->next_to_clean = i;
1872
1873	if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1874		     IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1875		/* Make sure that anybody stopping the queue after this
1876		 * sees the new next_to_clean. */
1877		smp_mb();
1878
1879		if (netif_queue_stopped(netdev) &&
1880		    !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1881			netif_wake_queue(netdev);
1882			++adapter->restart_queue;
1883		}
1884	}
1885
1886	if (adapter->detect_tx_hung) {
1887		/* detect a transmit hang in hardware, this serializes the
1888		 * check with the clearing of time_stamp and movement of i */
1889		adapter->detect_tx_hung = false;
1890		if (tx_ring->buffer_info[eop].time_stamp &&
1891		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1892		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1893		        IXGB_STATUS_TXOFF)) {
1894			/* detected Tx unit hang */
1895			netif_err(adapter, drv, adapter->netdev,
1896				  "Detected Tx Unit Hang\n"
1897				  "  TDH                  <%x>\n"
1898				  "  TDT                  <%x>\n"
1899				  "  next_to_use          <%x>\n"
1900				  "  next_to_clean        <%x>\n"
1901				  "buffer_info[next_to_clean]\n"
1902				  "  time_stamp           <%lx>\n"
1903				  "  next_to_watch        <%x>\n"
1904				  "  jiffies              <%lx>\n"
1905				  "  next_to_watch.status <%x>\n",
1906				  IXGB_READ_REG(&adapter->hw, TDH),
1907				  IXGB_READ_REG(&adapter->hw, TDT),
1908				  tx_ring->next_to_use,
1909				  tx_ring->next_to_clean,
1910				  tx_ring->buffer_info[eop].time_stamp,
1911				  eop,
1912				  jiffies,
1913				  eop_desc->status);
1914			netif_stop_queue(netdev);
1915		}
1916	}
1917
1918	return cleaned;
1919}
1920
1921/**
1922 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1923 * @adapter: board private structure
1924 * @rx_desc: receive descriptor
1925 * @sk_buff: socket buffer with received data
1926 **/
1927
1928static void
1929ixgb_rx_checksum(struct ixgb_adapter *adapter,
1930                 struct ixgb_rx_desc *rx_desc,
1931                 struct sk_buff *skb)
1932{
1933	/* Ignore Checksum bit is set OR
1934	 * TCP Checksum has not been calculated
1935	 */
1936	if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1937	   (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1938		skb_checksum_none_assert(skb);
1939		return;
1940	}
1941
1942	/* At this point we know the hardware did the TCP checksum */
1943	/* now look at the TCP checksum error bit */
1944	if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1945		/* let the stack verify checksum errors */
1946		skb_checksum_none_assert(skb);
1947		adapter->hw_csum_rx_error++;
1948	} else {
1949		/* TCP checksum is good */
1950		skb->ip_summed = CHECKSUM_UNNECESSARY;
1951		adapter->hw_csum_rx_good++;
1952	}
1953}
1954
1955/*
1956 * this should improve performance for small packets with large amounts
1957 * of reassembly being done in the stack
1958 */
1959static void ixgb_check_copybreak(struct napi_struct *napi,
1960				 struct ixgb_buffer *buffer_info,
1961				 u32 length, struct sk_buff **skb)
1962{
1963	struct sk_buff *new_skb;
1964
1965	if (length > copybreak)
1966		return;
1967
1968	new_skb = napi_alloc_skb(napi, length);
1969	if (!new_skb)
1970		return;
1971
1972	skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1973				       (*skb)->data - NET_IP_ALIGN,
1974				       length + NET_IP_ALIGN);
1975	/* save the skb in buffer_info as good */
1976	buffer_info->skb = *skb;
1977	*skb = new_skb;
1978}
1979
1980/**
1981 * ixgb_clean_rx_irq - Send received data up the network stack,
1982 * @adapter: board private structure
1983 **/
1984
1985static bool
1986ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1987{
1988	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1989	struct net_device *netdev = adapter->netdev;
1990	struct pci_dev *pdev = adapter->pdev;
1991	struct ixgb_rx_desc *rx_desc, *next_rxd;
1992	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1993	u32 length;
1994	unsigned int i, j;
1995	int cleaned_count = 0;
1996	bool cleaned = false;
1997
1998	i = rx_ring->next_to_clean;
1999	rx_desc = IXGB_RX_DESC(*rx_ring, i);
2000	buffer_info = &rx_ring->buffer_info[i];
2001
2002	while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
2003		struct sk_buff *skb;
2004		u8 status;
2005
2006		if (*work_done >= work_to_do)
2007			break;
2008
2009		(*work_done)++;
2010		rmb();	/* read descriptor and rx_buffer_info after status DD */
2011		status = rx_desc->status;
2012		skb = buffer_info->skb;
2013		buffer_info->skb = NULL;
2014
2015		prefetch(skb->data - NET_IP_ALIGN);
2016
2017		if (++i == rx_ring->count)
2018			i = 0;
2019		next_rxd = IXGB_RX_DESC(*rx_ring, i);
2020		prefetch(next_rxd);
2021
2022		j = i + 1;
2023		if (j == rx_ring->count)
2024			j = 0;
2025		next2_buffer = &rx_ring->buffer_info[j];
2026		prefetch(next2_buffer);
2027
2028		next_buffer = &rx_ring->buffer_info[i];
2029
2030		cleaned = true;
2031		cleaned_count++;
2032
2033		dma_unmap_single(&pdev->dev,
2034				 buffer_info->dma,
2035				 buffer_info->length,
2036				 DMA_FROM_DEVICE);
2037		buffer_info->dma = 0;
2038
2039		length = le16_to_cpu(rx_desc->length);
2040		rx_desc->length = 0;
2041
2042		if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2043
2044			/* All receives must fit into a single buffer */
2045
2046			pr_debug("Receive packet consumed multiple buffers length<%x>\n",
2047				 length);
2048
2049			dev_kfree_skb_irq(skb);
2050			goto rxdesc_done;
2051		}
2052
2053		if (unlikely(rx_desc->errors &
2054		    (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
2055		     IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
2056			dev_kfree_skb_irq(skb);
2057			goto rxdesc_done;
2058		}
2059
2060		ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
2061
2062		/* Good Receive */
2063		skb_put(skb, length);
2064
2065		/* Receive Checksum Offload */
2066		ixgb_rx_checksum(adapter, rx_desc, skb);
2067
2068		skb->protocol = eth_type_trans(skb, netdev);
2069		if (status & IXGB_RX_DESC_STATUS_VP)
2070			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2071				       le16_to_cpu(rx_desc->special));
2072
2073		netif_receive_skb(skb);
2074
2075rxdesc_done:
2076		/* clean up descriptor, might be written over by hw */
2077		rx_desc->status = 0;
2078
2079		/* return some buffers to hardware, one at a time is too slow */
2080		if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2081			ixgb_alloc_rx_buffers(adapter, cleaned_count);
2082			cleaned_count = 0;
2083		}
2084
2085		/* use prefetched values */
2086		rx_desc = next_rxd;
2087		buffer_info = next_buffer;
2088	}
2089
2090	rx_ring->next_to_clean = i;
2091
2092	cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2093	if (cleaned_count)
2094		ixgb_alloc_rx_buffers(adapter, cleaned_count);
2095
2096	return cleaned;
2097}
2098
2099/**
2100 * ixgb_alloc_rx_buffers - Replace used receive buffers
2101 * @adapter: address of board private structure
2102 **/
2103
2104static void
2105ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2106{
2107	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2108	struct net_device *netdev = adapter->netdev;
2109	struct pci_dev *pdev = adapter->pdev;
2110	struct ixgb_rx_desc *rx_desc;
2111	struct ixgb_buffer *buffer_info;
2112	struct sk_buff *skb;
2113	unsigned int i;
2114	long cleancount;
2115
2116	i = rx_ring->next_to_use;
2117	buffer_info = &rx_ring->buffer_info[i];
2118	cleancount = IXGB_DESC_UNUSED(rx_ring);
2119
2120
2121	/* leave three descriptors unused */
2122	while (--cleancount > 2 && cleaned_count--) {
2123		/* recycle! its good for you */
2124		skb = buffer_info->skb;
2125		if (skb) {
2126			skb_trim(skb, 0);
2127			goto map_skb;
2128		}
2129
2130		skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2131		if (unlikely(!skb)) {
2132			/* Better luck next round */
2133			adapter->alloc_rx_buff_failed++;
2134			break;
2135		}
2136
2137		buffer_info->skb = skb;
2138		buffer_info->length = adapter->rx_buffer_len;
2139map_skb:
2140		buffer_info->dma = dma_map_single(&pdev->dev,
2141		                                  skb->data,
2142		                                  adapter->rx_buffer_len,
2143						  DMA_FROM_DEVICE);
2144		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
2145			adapter->alloc_rx_buff_failed++;
2146			break;
2147		}
2148
2149		rx_desc = IXGB_RX_DESC(*rx_ring, i);
2150		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2151		/* guarantee DD bit not set now before h/w gets descriptor
2152		 * this is the rest of the workaround for h/w double
2153		 * writeback. */
2154		rx_desc->status = 0;
2155
2156
2157		if (++i == rx_ring->count)
2158			i = 0;
2159		buffer_info = &rx_ring->buffer_info[i];
2160	}
2161
2162	if (likely(rx_ring->next_to_use != i)) {
2163		rx_ring->next_to_use = i;
2164		if (unlikely(i-- == 0))
2165			i = (rx_ring->count - 1);
2166
2167		/* Force memory writes to complete before letting h/w
2168		 * know there are new descriptors to fetch.  (Only
2169		 * applicable for weak-ordered memory model archs, such
2170		 * as IA-64). */
2171		wmb();
2172		IXGB_WRITE_REG(&adapter->hw, RDT, i);
2173	}
2174}
2175
2176static void
2177ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2178{
2179	u32 ctrl;
2180
2181	/* enable VLAN tag insert/strip */
2182	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2183	ctrl |= IXGB_CTRL0_VME;
2184	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2185}
2186
2187static void
2188ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2189{
2190	u32 ctrl;
2191
2192	/* disable VLAN tag insert/strip */
2193	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2194	ctrl &= ~IXGB_CTRL0_VME;
2195	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2196}
2197
2198static int
2199ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
2200{
2201	struct ixgb_adapter *adapter = netdev_priv(netdev);
2202	u32 vfta, index;
2203
2204	/* add VID to filter table */
2205
2206	index = (vid >> 5) & 0x7F;
2207	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2208	vfta |= (1 << (vid & 0x1F));
2209	ixgb_write_vfta(&adapter->hw, index, vfta);
2210	set_bit(vid, adapter->active_vlans);
2211
2212	return 0;
2213}
2214
2215static int
2216ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2217{
2218	struct ixgb_adapter *adapter = netdev_priv(netdev);
2219	u32 vfta, index;
2220
2221	/* remove VID from filter table */
2222
2223	index = (vid >> 5) & 0x7F;
2224	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2225	vfta &= ~(1 << (vid & 0x1F));
2226	ixgb_write_vfta(&adapter->hw, index, vfta);
2227	clear_bit(vid, adapter->active_vlans);
2228
2229	return 0;
2230}
2231
2232static void
2233ixgb_restore_vlan(struct ixgb_adapter *adapter)
2234{
2235	u16 vid;
2236
2237	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2238		ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2239}
2240
2241#ifdef CONFIG_NET_POLL_CONTROLLER
2242/*
2243 * Polling 'interrupt' - used by things like netconsole to send skbs
2244 * without having to re-enable interrupts. It's not called while
2245 * the interrupt routine is executing.
2246 */
2247
2248static void ixgb_netpoll(struct net_device *dev)
2249{
2250	struct ixgb_adapter *adapter = netdev_priv(dev);
2251
2252	disable_irq(adapter->pdev->irq);
2253	ixgb_intr(adapter->pdev->irq, dev);
2254	enable_irq(adapter->pdev->irq);
2255}
2256#endif
2257
2258/**
2259 * ixgb_io_error_detected - called when PCI error is detected
2260 * @pdev:    pointer to pci device with error
2261 * @state:   pci channel state after error
2262 *
2263 * This callback is called by the PCI subsystem whenever
2264 * a PCI bus error is detected.
2265 */
2266static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2267                                               enum pci_channel_state state)
2268{
2269	struct net_device *netdev = pci_get_drvdata(pdev);
2270	struct ixgb_adapter *adapter = netdev_priv(netdev);
2271
2272	netif_device_detach(netdev);
2273
2274	if (state == pci_channel_io_perm_failure)
2275		return PCI_ERS_RESULT_DISCONNECT;
2276
2277	if (netif_running(netdev))
2278		ixgb_down(adapter, true);
2279
2280	pci_disable_device(pdev);
2281
2282	/* Request a slot reset. */
2283	return PCI_ERS_RESULT_NEED_RESET;
2284}
2285
2286/**
2287 * ixgb_io_slot_reset - called after the pci bus has been reset.
2288 * @pdev    pointer to pci device with error
2289 *
2290 * This callback is called after the PCI bus has been reset.
2291 * Basically, this tries to restart the card from scratch.
2292 * This is a shortened version of the device probe/discovery code,
2293 * it resembles the first-half of the ixgb_probe() routine.
2294 */
2295static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2296{
2297	struct net_device *netdev = pci_get_drvdata(pdev);
2298	struct ixgb_adapter *adapter = netdev_priv(netdev);
2299
2300	if (pci_enable_device(pdev)) {
2301		netif_err(adapter, probe, adapter->netdev,
2302			  "Cannot re-enable PCI device after reset\n");
2303		return PCI_ERS_RESULT_DISCONNECT;
2304	}
2305
2306	/* Perform card reset only on one instance of the card */
2307	if (0 != PCI_FUNC (pdev->devfn))
2308		return PCI_ERS_RESULT_RECOVERED;
2309
2310	pci_set_master(pdev);
2311
2312	netif_carrier_off(netdev);
2313	netif_stop_queue(netdev);
2314	ixgb_reset(adapter);
2315
2316	/* Make sure the EEPROM is good */
2317	if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2318		netif_err(adapter, probe, adapter->netdev,
2319			  "After reset, the EEPROM checksum is not valid\n");
2320		return PCI_ERS_RESULT_DISCONNECT;
2321	}
2322	ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2323	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2324
2325	if (!is_valid_ether_addr(netdev->perm_addr)) {
2326		netif_err(adapter, probe, adapter->netdev,
2327			  "After reset, invalid MAC address\n");
2328		return PCI_ERS_RESULT_DISCONNECT;
2329	}
2330
2331	return PCI_ERS_RESULT_RECOVERED;
2332}
2333
2334/**
2335 * ixgb_io_resume - called when its OK to resume normal operations
2336 * @pdev    pointer to pci device with error
2337 *
2338 * The error recovery driver tells us that its OK to resume
2339 * normal operation. Implementation resembles the second-half
2340 * of the ixgb_probe() routine.
2341 */
2342static void ixgb_io_resume(struct pci_dev *pdev)
2343{
2344	struct net_device *netdev = pci_get_drvdata(pdev);
2345	struct ixgb_adapter *adapter = netdev_priv(netdev);
2346
2347	pci_set_master(pdev);
2348
2349	if (netif_running(netdev)) {
2350		if (ixgb_up(adapter)) {
2351			pr_err("can't bring device back up after reset\n");
2352			return;
2353		}
2354	}
2355
2356	netif_device_attach(netdev);
2357	mod_timer(&adapter->watchdog_timer, jiffies);
2358}
2359
2360/* ixgb_main.c */