Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v6.8.
   1/*
   2 * QLogic qlge NIC HBA Driver
   3 * Copyright (c)  2003-2008 QLogic Corporation
   4 * See LICENSE.qlge for copyright and licensing details.
   5 * Author:     Linux qlge network device driver by
   6 *                      Ron Mercer <ron.mercer@qlogic.com>
   7 */
   8#include <linux/kernel.h>
   9#include <linux/bitops.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/slab.h>
  18#include <linux/dmapool.h>
  19#include <linux/mempool.h>
  20#include <linux/spinlock.h>
  21#include <linux/kthread.h>
  22#include <linux/interrupt.h>
  23#include <linux/errno.h>
  24#include <linux/ioport.h>
  25#include <linux/in.h>
  26#include <linux/ip.h>
  27#include <linux/ipv6.h>
  28#include <net/ipv6.h>
  29#include <linux/tcp.h>
  30#include <linux/udp.h>
  31#include <linux/if_arp.h>
  32#include <linux/if_ether.h>
  33#include <linux/netdevice.h>
  34#include <linux/etherdevice.h>
  35#include <linux/ethtool.h>
  36#include <linux/if_vlan.h>
  37#include <linux/skbuff.h>
  38#include <linux/delay.h>
  39#include <linux/mm.h>
  40#include <linux/vmalloc.h>
  41#include <linux/prefetch.h>
  42#include <net/ip6_checksum.h>
  43
  44#include "qlge.h"
  45
  46char qlge_driver_name[] = DRV_NAME;
  47const char qlge_driver_version[] = DRV_VERSION;
  48
  49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  50MODULE_DESCRIPTION(DRV_STRING " ");
  51MODULE_LICENSE("GPL");
  52MODULE_VERSION(DRV_VERSION);
  53
  54static const u32 default_msg =
  55    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  56/* NETIF_MSG_TIMER |	*/
  57    NETIF_MSG_IFDOWN |
  58    NETIF_MSG_IFUP |
  59    NETIF_MSG_RX_ERR |
  60    NETIF_MSG_TX_ERR |
  61/*  NETIF_MSG_TX_QUEUED | */
  62/*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  63/* NETIF_MSG_PKTDATA | */
  64    NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  65
  66static int debug = -1;	/* defaults above */
  67module_param(debug, int, 0664);
  68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  69
  70#define MSIX_IRQ 0
  71#define MSI_IRQ 1
  72#define LEG_IRQ 2
  73static int qlge_irq_type = MSIX_IRQ;
  74module_param(qlge_irq_type, int, 0664);
  75MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  76
  77static int qlge_mpi_coredump;
  78module_param(qlge_mpi_coredump, int, 0);
  79MODULE_PARM_DESC(qlge_mpi_coredump,
  80		"Option to enable MPI firmware dump. "
  81		"Default is OFF - Do Not allocate memory. ");
  82
  83static int qlge_force_coredump;
  84module_param(qlge_force_coredump, int, 0);
  85MODULE_PARM_DESC(qlge_force_coredump,
  86		"Option to allow force of firmware core dump. "
  87		"Default is OFF - Do not allow.");
  88
  89static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
  90	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  91	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  92	/* required last entry */
  93	{0,}
  94};
  95
  96MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  97
  98static int ql_wol(struct ql_adapter *);
  99static void qlge_set_multicast_list(struct net_device *);
 100static int ql_adapter_down(struct ql_adapter *);
 101static int ql_adapter_up(struct ql_adapter *);
 102
 103/* This hardware semaphore causes exclusive access to
 104 * resources shared between the NIC driver, MPI firmware,
 105 * FCOE firmware and the FC driver.
 106 */
 107static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
 108{
 109	u32 sem_bits = 0;
 110
 111	switch (sem_mask) {
 112	case SEM_XGMAC0_MASK:
 113		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
 114		break;
 115	case SEM_XGMAC1_MASK:
 116		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
 117		break;
 118	case SEM_ICB_MASK:
 119		sem_bits = SEM_SET << SEM_ICB_SHIFT;
 120		break;
 121	case SEM_MAC_ADDR_MASK:
 122		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
 123		break;
 124	case SEM_FLASH_MASK:
 125		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
 126		break;
 127	case SEM_PROBE_MASK:
 128		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
 129		break;
 130	case SEM_RT_IDX_MASK:
 131		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
 132		break;
 133	case SEM_PROC_REG_MASK:
 134		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
 135		break;
 136	default:
 137		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
 138		return -EINVAL;
 139	}
 140
 141	ql_write32(qdev, SEM, sem_bits | sem_mask);
 142	return !(ql_read32(qdev, SEM) & sem_bits);
 143}
 144
 145int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
 146{
 147	unsigned int wait_count = 30;
 148	do {
 149		if (!ql_sem_trylock(qdev, sem_mask))
 150			return 0;
 151		udelay(100);
 152	} while (--wait_count);
 153	return -ETIMEDOUT;
 154}
 155
 156void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
 157{
 158	ql_write32(qdev, SEM, sem_mask);
 159	ql_read32(qdev, SEM);	/* flush */
 160}
 161
 162/* This function waits for a specific bit to come ready
 163 * in a given register.  It is used mostly by the initialize
 164 * process, but is also used in kernel thread API such as
 165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
 166 */
 167int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
 168{
 169	u32 temp;
 170	int count = UDELAY_COUNT;
 171
 172	while (count) {
 173		temp = ql_read32(qdev, reg);
 174
 175		/* check for errors */
 176		if (temp & err_bit) {
 177			netif_alert(qdev, probe, qdev->ndev,
 178				    "register 0x%.08x access error, value = 0x%.08x!.\n",
 179				    reg, temp);
 180			return -EIO;
 181		} else if (temp & bit)
 182			return 0;
 183		udelay(UDELAY_DELAY);
 184		count--;
 185	}
 186	netif_alert(qdev, probe, qdev->ndev,
 187		    "Timed out waiting for reg %x to come ready.\n", reg);
 188	return -ETIMEDOUT;
 189}
 190
 191/* The CFG register is used to download TX and RX control blocks
 192 * to the chip. This function waits for an operation to complete.
 193 */
 194static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
 195{
 196	int count = UDELAY_COUNT;
 197	u32 temp;
 198
 199	while (count) {
 200		temp = ql_read32(qdev, CFG);
 201		if (temp & CFG_LE)
 202			return -EIO;
 203		if (!(temp & bit))
 204			return 0;
 205		udelay(UDELAY_DELAY);
 206		count--;
 207	}
 208	return -ETIMEDOUT;
 209}
 210
 211
 212/* Used to issue init control blocks to hw. Maps control block,
 213 * sets address, triggers download, waits for completion.
 214 */
 215int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
 216		 u16 q_id)
 217{
 218	u64 map;
 219	int status = 0;
 220	int direction;
 221	u32 mask;
 222	u32 value;
 223
 224	direction =
 225	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
 226	    PCI_DMA_FROMDEVICE;
 227
 228	map = pci_map_single(qdev->pdev, ptr, size, direction);
 229	if (pci_dma_mapping_error(qdev->pdev, map)) {
 230		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
 231		return -ENOMEM;
 232	}
 233
 234	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
 235	if (status)
 236		return status;
 237
 238	status = ql_wait_cfg(qdev, bit);
 239	if (status) {
 240		netif_err(qdev, ifup, qdev->ndev,
 241			  "Timed out waiting for CFG to come ready.\n");
 242		goto exit;
 243	}
 244
 245	ql_write32(qdev, ICB_L, (u32) map);
 246	ql_write32(qdev, ICB_H, (u32) (map >> 32));
 247
 248	mask = CFG_Q_MASK | (bit << 16);
 249	value = bit | (q_id << CFG_Q_SHIFT);
 250	ql_write32(qdev, CFG, (mask | value));
 251
 252	/*
 253	 * Wait for the bit to clear after signaling hw.
 254	 */
 255	status = ql_wait_cfg(qdev, bit);
 256exit:
 257	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
 258	pci_unmap_single(qdev->pdev, map, size, direction);
 259	return status;
 260}
 261
 262/* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
 263int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
 264			u32 *value)
 265{
 266	u32 offset = 0;
 267	int status;
 268
 269	switch (type) {
 270	case MAC_ADDR_TYPE_MULTI_MAC:
 271	case MAC_ADDR_TYPE_CAM_MAC:
 272		{
 273			status =
 274			    ql_wait_reg_rdy(qdev,
 275				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 276			if (status)
 277				goto exit;
 278			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 279				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 280				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 281			status =
 282			    ql_wait_reg_rdy(qdev,
 283				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 284			if (status)
 285				goto exit;
 286			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 287			status =
 288			    ql_wait_reg_rdy(qdev,
 289				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 290			if (status)
 291				goto exit;
 292			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 293				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 294				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 295			status =
 296			    ql_wait_reg_rdy(qdev,
 297				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
 298			if (status)
 299				goto exit;
 300			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 301			if (type == MAC_ADDR_TYPE_CAM_MAC) {
 302				status =
 303				    ql_wait_reg_rdy(qdev,
 304					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 305				if (status)
 306					goto exit;
 307				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 308					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 309					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
 310				status =
 311				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
 312						    MAC_ADDR_MR, 0);
 313				if (status)
 314					goto exit;
 315				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
 316			}
 317			break;
 318		}
 319	case MAC_ADDR_TYPE_VLAN:
 320	case MAC_ADDR_TYPE_MULTI_FLTR:
 321	default:
 322		netif_crit(qdev, ifup, qdev->ndev,
 323			   "Address type %d not yet supported.\n", type);
 324		status = -EPERM;
 325	}
 326exit:
 327	return status;
 328}
 329
 330/* Set up a MAC, multicast or VLAN address for the
 331 * inbound frame matching.
 332 */
 333static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
 334			       u16 index)
 335{
 336	u32 offset = 0;
 337	int status = 0;
 338
 339	switch (type) {
 340	case MAC_ADDR_TYPE_MULTI_MAC:
 341		{
 342			u32 upper = (addr[0] << 8) | addr[1];
 343			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
 344					(addr[4] << 8) | (addr[5]);
 345
 346			status =
 347				ql_wait_reg_rdy(qdev,
 348				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 349			if (status)
 350				goto exit;
 351			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 352				(index << MAC_ADDR_IDX_SHIFT) |
 353				type | MAC_ADDR_E);
 354			ql_write32(qdev, MAC_ADDR_DATA, lower);
 355			status =
 356				ql_wait_reg_rdy(qdev,
 357				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 358			if (status)
 359				goto exit;
 360			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
 361				(index << MAC_ADDR_IDX_SHIFT) |
 362				type | MAC_ADDR_E);
 363
 364			ql_write32(qdev, MAC_ADDR_DATA, upper);
 365			status =
 366				ql_wait_reg_rdy(qdev,
 367				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 368			if (status)
 369				goto exit;
 370			break;
 371		}
 372	case MAC_ADDR_TYPE_CAM_MAC:
 373		{
 374			u32 cam_output;
 375			u32 upper = (addr[0] << 8) | addr[1];
 376			u32 lower =
 377			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
 378			    (addr[5]);
 379			status =
 380			    ql_wait_reg_rdy(qdev,
 381				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 382			if (status)
 383				goto exit;
 384			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 385				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 386				   type);	/* type */
 387			ql_write32(qdev, MAC_ADDR_DATA, lower);
 388			status =
 389			    ql_wait_reg_rdy(qdev,
 390				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 391			if (status)
 392				goto exit;
 393			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
 394				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
 395				   type);	/* type */
 396			ql_write32(qdev, MAC_ADDR_DATA, upper);
 397			status =
 398			    ql_wait_reg_rdy(qdev,
 399				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 400			if (status)
 401				goto exit;
 402			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
 403				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
 404				   type);	/* type */
 405			/* This field should also include the queue id
 406			   and possibly the function id.  Right now we hardcode
 407			   the route field to NIC core.
 408			 */
 409			cam_output = (CAM_OUT_ROUTE_NIC |
 410				      (qdev->
 411				       func << CAM_OUT_FUNC_SHIFT) |
 412					(0 << CAM_OUT_CQ_ID_SHIFT));
 413			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
 414				cam_output |= CAM_OUT_RV;
 415			/* route to NIC core */
 416			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
 417			break;
 418		}
 419	case MAC_ADDR_TYPE_VLAN:
 420		{
 421			u32 enable_bit = *((u32 *) &addr[0]);
 422			/* For VLAN, the addr actually holds a bit that
 423			 * either enables or disables the vlan id we are
 424			 * addressing. It's either MAC_ADDR_E on or off.
 425			 * That's bit-27 we're talking about.
 426			 */
 427			status =
 428			    ql_wait_reg_rdy(qdev,
 429				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
 430			if (status)
 431				goto exit;
 432			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
 433				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
 434				   type |	/* type */
 435				   enable_bit);	/* enable/disable */
 436			break;
 437		}
 438	case MAC_ADDR_TYPE_MULTI_FLTR:
 439	default:
 440		netif_crit(qdev, ifup, qdev->ndev,
 441			   "Address type %d not yet supported.\n", type);
 442		status = -EPERM;
 443	}
 444exit:
 445	return status;
 446}
 447
 448/* Set or clear MAC address in hardware. We sometimes
 449 * have to clear it to prevent wrong frame routing
 450 * especially in a bonding environment.
 451 */
 452static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
 453{
 454	int status;
 455	char zero_mac_addr[ETH_ALEN];
 456	char *addr;
 457
 458	if (set) {
 459		addr = &qdev->current_mac_addr[0];
 460		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 461			     "Set Mac addr %pM\n", addr);
 462	} else {
 463		memset(zero_mac_addr, 0, ETH_ALEN);
 464		addr = &zero_mac_addr[0];
 465		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
 466			     "Clearing MAC address\n");
 467	}
 468	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
 469	if (status)
 470		return status;
 471	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
 472			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
 473	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
 474	if (status)
 475		netif_err(qdev, ifup, qdev->ndev,
 476			  "Failed to init mac address.\n");
 477	return status;
 478}
 479
 480void ql_link_on(struct ql_adapter *qdev)
 481{
 482	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
 483	netif_carrier_on(qdev->ndev);
 484	ql_set_mac_addr(qdev, 1);
 485}
 486
 487void ql_link_off(struct ql_adapter *qdev)
 488{
 489	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
 490	netif_carrier_off(qdev->ndev);
 491	ql_set_mac_addr(qdev, 0);
 492}
 493
 494/* Get a specific frame routing value from the CAM.
 495 * Used for debug and reg dump.
 496 */
 497int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
 498{
 499	int status = 0;
 500
 501	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 502	if (status)
 503		goto exit;
 504
 505	ql_write32(qdev, RT_IDX,
 506		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
 507	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
 508	if (status)
 509		goto exit;
 510	*value = ql_read32(qdev, RT_DATA);
 511exit:
 512	return status;
 513}
 514
 515/* The NIC function for this chip has 16 routing indexes.  Each one can be used
 516 * to route different frame types to various inbound queues.  We send broadcast/
 517 * multicast/error frames to the default queue for slow handling,
 518 * and CAM hit/RSS frames to the fast handling queues.
 519 */
 520static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
 521			      int enable)
 522{
 523	int status = -EINVAL; /* Return error if no mask match. */
 524	u32 value = 0;
 525
 526	switch (mask) {
 527	case RT_IDX_CAM_HIT:
 528		{
 529			value = RT_IDX_DST_CAM_Q |	/* dest */
 530			    RT_IDX_TYPE_NICQ |	/* type */
 531			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
 532			break;
 533		}
 534	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
 535		{
 536			value = RT_IDX_DST_DFLT_Q |	/* dest */
 537			    RT_IDX_TYPE_NICQ |	/* type */
 538			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
 539			break;
 540		}
 541	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
 542		{
 543			value = RT_IDX_DST_DFLT_Q |	/* dest */
 544			    RT_IDX_TYPE_NICQ |	/* type */
 545			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
 546			break;
 547		}
 548	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
 549		{
 550			value = RT_IDX_DST_DFLT_Q | /* dest */
 551				RT_IDX_TYPE_NICQ | /* type */
 552				(RT_IDX_IP_CSUM_ERR_SLOT <<
 553				RT_IDX_IDX_SHIFT); /* index */
 554			break;
 555		}
 556	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
 557		{
 558			value = RT_IDX_DST_DFLT_Q | /* dest */
 559				RT_IDX_TYPE_NICQ | /* type */
 560				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
 561				RT_IDX_IDX_SHIFT); /* index */
 562			break;
 563		}
 564	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
 565		{
 566			value = RT_IDX_DST_DFLT_Q |	/* dest */
 567			    RT_IDX_TYPE_NICQ |	/* type */
 568			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
 569			break;
 570		}
 571	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
 572		{
 573			value = RT_IDX_DST_DFLT_Q |	/* dest */
 574			    RT_IDX_TYPE_NICQ |	/* type */
 575			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
 576			break;
 577		}
 578	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
 579		{
 580			value = RT_IDX_DST_DFLT_Q |	/* dest */
 581			    RT_IDX_TYPE_NICQ |	/* type */
 582			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 583			break;
 584		}
 585	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
 586		{
 587			value = RT_IDX_DST_RSS |	/* dest */
 588			    RT_IDX_TYPE_NICQ |	/* type */
 589			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
 590			break;
 591		}
 592	case 0:		/* Clear the E-bit on an entry. */
 593		{
 594			value = RT_IDX_DST_DFLT_Q |	/* dest */
 595			    RT_IDX_TYPE_NICQ |	/* type */
 596			    (index << RT_IDX_IDX_SHIFT);/* index */
 597			break;
 598		}
 599	default:
 600		netif_err(qdev, ifup, qdev->ndev,
 601			  "Mask type %d not yet supported.\n", mask);
 602		status = -EPERM;
 603		goto exit;
 604	}
 605
 606	if (value) {
 607		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
 608		if (status)
 609			goto exit;
 610		value |= (enable ? RT_IDX_E : 0);
 611		ql_write32(qdev, RT_IDX, value);
 612		ql_write32(qdev, RT_DATA, enable ? mask : 0);
 613	}
 614exit:
 615	return status;
 616}
 617
 618static void ql_enable_interrupts(struct ql_adapter *qdev)
 619{
 620	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
 621}
 622
 623static void ql_disable_interrupts(struct ql_adapter *qdev)
 624{
 625	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
 626}
 627
 628/* If we're running with multiple MSI-X vectors then we enable on the fly.
 629 * Otherwise, we may have multiple outstanding workers and don't want to
 630 * enable until the last one finishes. In this case, the irq_cnt gets
 631 * incremented every time we queue a worker and decremented every time
 632 * a worker finishes.  Once it hits zero we enable the interrupt.
 633 */
 634u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 635{
 636	u32 var = 0;
 637	unsigned long hw_flags = 0;
 638	struct intr_context *ctx = qdev->intr_context + intr;
 639
 640	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
 641		/* Always enable if we're MSIX multi interrupts and
 642		 * it's not the default (zeroeth) interrupt.
 643		 */
 644		ql_write32(qdev, INTR_EN,
 645			   ctx->intr_en_mask);
 646		var = ql_read32(qdev, STS);
 647		return var;
 648	}
 649
 650	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 651	if (atomic_dec_and_test(&ctx->irq_cnt)) {
 652		ql_write32(qdev, INTR_EN,
 653			   ctx->intr_en_mask);
 654		var = ql_read32(qdev, STS);
 655	}
 656	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 657	return var;
 658}
 659
 660static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
 661{
 662	u32 var = 0;
 663	struct intr_context *ctx;
 664
 665	/* HW disables for us if we're MSIX multi interrupts and
 666	 * it's not the default (zeroeth) interrupt.
 667	 */
 668	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
 669		return 0;
 670
 671	ctx = qdev->intr_context + intr;
 672	spin_lock(&qdev->hw_lock);
 673	if (!atomic_read(&ctx->irq_cnt)) {
 674		ql_write32(qdev, INTR_EN,
 675		ctx->intr_dis_mask);
 676		var = ql_read32(qdev, STS);
 677	}
 678	atomic_inc(&ctx->irq_cnt);
 679	spin_unlock(&qdev->hw_lock);
 680	return var;
 681}
 682
 683static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
 684{
 685	int i;
 686	for (i = 0; i < qdev->intr_count; i++) {
 687		/* The enable call does a atomic_dec_and_test
 688		 * and enables only if the result is zero.
 689		 * So we precharge it here.
 690		 */
 691		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
 692			i == 0))
 693			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
 694		ql_enable_completion_interrupt(qdev, i);
 695	}
 696
 697}
 698
 699static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
 700{
 701	int status, i;
 702	u16 csum = 0;
 703	__le16 *flash = (__le16 *)&qdev->flash;
 704
 705	status = strncmp((char *)&qdev->flash, str, 4);
 706	if (status) {
 707		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
 708		return	status;
 709	}
 710
 711	for (i = 0; i < size; i++)
 712		csum += le16_to_cpu(*flash++);
 713
 714	if (csum)
 715		netif_err(qdev, ifup, qdev->ndev,
 716			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
 717
 718	return csum;
 719}
 720
 721static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
 722{
 723	int status = 0;
 724	/* wait for reg to come ready */
 725	status = ql_wait_reg_rdy(qdev,
 726			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 727	if (status)
 728		goto exit;
 729	/* set up for reg read */
 730	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
 731	/* wait for reg to come ready */
 732	status = ql_wait_reg_rdy(qdev,
 733			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
 734	if (status)
 735		goto exit;
 736	 /* This data is stored on flash as an array of
 737	 * __le32.  Since ql_read32() returns cpu endian
 738	 * we need to swap it back.
 739	 */
 740	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
 741exit:
 742	return status;
 743}
 744
 745static int ql_get_8000_flash_params(struct ql_adapter *qdev)
 746{
 747	u32 i, size;
 748	int status;
 749	__le32 *p = (__le32 *)&qdev->flash;
 750	u32 offset;
 751	u8 mac_addr[6];
 752
 753	/* Get flash offset for function and adjust
 754	 * for dword access.
 755	 */
 756	if (!qdev->port)
 757		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
 758	else
 759		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
 760
 761	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 762		return -ETIMEDOUT;
 763
 764	size = sizeof(struct flash_params_8000) / sizeof(u32);
 765	for (i = 0; i < size; i++, p++) {
 766		status = ql_read_flash_word(qdev, i+offset, p);
 767		if (status) {
 768			netif_err(qdev, ifup, qdev->ndev,
 769				  "Error reading flash.\n");
 770			goto exit;
 771		}
 772	}
 773
 774	status = ql_validate_flash(qdev,
 775			sizeof(struct flash_params_8000) / sizeof(u16),
 776			"8000");
 777	if (status) {
 778		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 779		status = -EINVAL;
 780		goto exit;
 781	}
 782
 783	/* Extract either manufacturer or BOFM modified
 784	 * MAC address.
 785	 */
 786	if (qdev->flash.flash_params_8000.data_type1 == 2)
 787		memcpy(mac_addr,
 788			qdev->flash.flash_params_8000.mac_addr1,
 789			qdev->ndev->addr_len);
 790	else
 791		memcpy(mac_addr,
 792			qdev->flash.flash_params_8000.mac_addr,
 793			qdev->ndev->addr_len);
 794
 795	if (!is_valid_ether_addr(mac_addr)) {
 796		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
 797		status = -EINVAL;
 798		goto exit;
 799	}
 800
 801	memcpy(qdev->ndev->dev_addr,
 802		mac_addr,
 803		qdev->ndev->addr_len);
 804
 805exit:
 806	ql_sem_unlock(qdev, SEM_FLASH_MASK);
 807	return status;
 808}
 809
 810static int ql_get_8012_flash_params(struct ql_adapter *qdev)
 811{
 812	int i;
 813	int status;
 814	__le32 *p = (__le32 *)&qdev->flash;
 815	u32 offset = 0;
 816	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
 817
 818	/* Second function's parameters follow the first
 819	 * function's.
 820	 */
 821	if (qdev->port)
 822		offset = size;
 823
 824	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
 825		return -ETIMEDOUT;
 826
 827	for (i = 0; i < size; i++, p++) {
 828		status = ql_read_flash_word(qdev, i+offset, p);
 829		if (status) {
 830			netif_err(qdev, ifup, qdev->ndev,
 831				  "Error reading flash.\n");
 832			goto exit;
 833		}
 834
 835	}
 836
 837	status = ql_validate_flash(qdev,
 838			sizeof(struct flash_params_8012) / sizeof(u16),
 839			"8012");
 840	if (status) {
 841		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
 842		status = -EINVAL;
 843		goto exit;
 844	}
 845
 846	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
 847		status = -EINVAL;
 848		goto exit;
 849	}
 850
 851	memcpy(qdev->ndev->dev_addr,
 852		qdev->flash.flash_params_8012.mac_addr,
 853		qdev->ndev->addr_len);
 854
 855exit:
 856	ql_sem_unlock(qdev, SEM_FLASH_MASK);
 857	return status;
 858}
 859
 860/* xgmac register are located behind the xgmac_addr and xgmac_data
 861 * register pair.  Each read/write requires us to wait for the ready
 862 * bit before reading/writing the data.
 863 */
 864static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
 865{
 866	int status;
 867	/* wait for reg to come ready */
 868	status = ql_wait_reg_rdy(qdev,
 869			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 870	if (status)
 871		return status;
 872	/* write the data to the data reg */
 873	ql_write32(qdev, XGMAC_DATA, data);
 874	/* trigger the write */
 875	ql_write32(qdev, XGMAC_ADDR, reg);
 876	return status;
 877}
 878
 879/* xgmac register are located behind the xgmac_addr and xgmac_data
 880 * register pair.  Each read/write requires us to wait for the ready
 881 * bit before reading/writing the data.
 882 */
 883int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
 884{
 885	int status = 0;
 886	/* wait for reg to come ready */
 887	status = ql_wait_reg_rdy(qdev,
 888			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 889	if (status)
 890		goto exit;
 891	/* set up for reg read */
 892	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
 893	/* wait for reg to come ready */
 894	status = ql_wait_reg_rdy(qdev,
 895			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
 896	if (status)
 897		goto exit;
 898	/* get the data */
 899	*data = ql_read32(qdev, XGMAC_DATA);
 900exit:
 901	return status;
 902}
 903
 904/* This is used for reading the 64-bit statistics regs. */
 905int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
 906{
 907	int status = 0;
 908	u32 hi = 0;
 909	u32 lo = 0;
 910
 911	status = ql_read_xgmac_reg(qdev, reg, &lo);
 912	if (status)
 913		goto exit;
 914
 915	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
 916	if (status)
 917		goto exit;
 918
 919	*data = (u64) lo | ((u64) hi << 32);
 920
 921exit:
 922	return status;
 923}
 924
 925static int ql_8000_port_initialize(struct ql_adapter *qdev)
 926{
 927	int status;
 928	/*
 929	 * Get MPI firmware version for driver banner
 930	 * and ethool info.
 931	 */
 932	status = ql_mb_about_fw(qdev);
 933	if (status)
 934		goto exit;
 935	status = ql_mb_get_fw_state(qdev);
 936	if (status)
 937		goto exit;
 938	/* Wake up a worker to get/set the TX/RX frame sizes. */
 939	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
 940exit:
 941	return status;
 942}
 943
 944/* Take the MAC Core out of reset.
 945 * Enable statistics counting.
 946 * Take the transmitter/receiver out of reset.
 947 * This functionality may be done in the MPI firmware at a
 948 * later date.
 949 */
 950static int ql_8012_port_initialize(struct ql_adapter *qdev)
 951{
 952	int status = 0;
 953	u32 data;
 954
 955	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
 956		/* Another function has the semaphore, so
 957		 * wait for the port init bit to come ready.
 958		 */
 959		netif_info(qdev, link, qdev->ndev,
 960			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
 961		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
 962		if (status) {
 963			netif_crit(qdev, link, qdev->ndev,
 964				   "Port initialize timed out.\n");
 965		}
 966		return status;
 967	}
 968
 969	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
 970	/* Set the core reset. */
 971	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
 972	if (status)
 973		goto end;
 974	data |= GLOBAL_CFG_RESET;
 975	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 976	if (status)
 977		goto end;
 978
 979	/* Clear the core reset and turn on jumbo for receiver. */
 980	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
 981	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
 982	data |= GLOBAL_CFG_TX_STAT_EN;
 983	data |= GLOBAL_CFG_RX_STAT_EN;
 984	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
 985	if (status)
 986		goto end;
 987
 988	/* Enable transmitter, and clear it's reset. */
 989	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
 990	if (status)
 991		goto end;
 992	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
 993	data |= TX_CFG_EN;	/* Enable the transmitter. */
 994	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
 995	if (status)
 996		goto end;
 997
 998	/* Enable receiver and clear it's reset. */
 999	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000	if (status)
1001		goto end;
1002	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1003	data |= RX_CFG_EN;	/* Enable the receiver. */
1004	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005	if (status)
1006		goto end;
1007
1008	/* Turn on jumbo. */
1009	status =
1010	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011	if (status)
1012		goto end;
1013	status =
1014	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015	if (status)
1016		goto end;
1017
1018	/* Signal to the world that the port is enabled.        */
1019	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020end:
1021	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022	return status;
1023}
1024
1025static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026{
1027	return PAGE_SIZE << qdev->lbq_buf_order;
1028}
1029
1030/* Get the next large buffer. */
1031static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032{
1033	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034	rx_ring->lbq_curr_idx++;
1035	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036		rx_ring->lbq_curr_idx = 0;
1037	rx_ring->lbq_free_cnt++;
1038	return lbq_desc;
1039}
1040
1041static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042		struct rx_ring *rx_ring)
1043{
1044	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046	pci_dma_sync_single_for_cpu(qdev->pdev,
1047					dma_unmap_addr(lbq_desc, mapaddr),
1048				    rx_ring->lbq_buf_size,
1049					PCI_DMA_FROMDEVICE);
1050
1051	/* If it's the last chunk of our master page then
1052	 * we unmap it.
1053	 */
1054	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055					== ql_lbq_block_size(qdev))
1056		pci_unmap_page(qdev->pdev,
1057				lbq_desc->p.pg_chunk.map,
1058				ql_lbq_block_size(qdev),
1059				PCI_DMA_FROMDEVICE);
1060	return lbq_desc;
1061}
1062
1063/* Get the next small buffer. */
1064static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065{
1066	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067	rx_ring->sbq_curr_idx++;
1068	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069		rx_ring->sbq_curr_idx = 0;
1070	rx_ring->sbq_free_cnt++;
1071	return sbq_desc;
1072}
1073
1074/* Update an rx ring index. */
1075static void ql_update_cq(struct rx_ring *rx_ring)
1076{
1077	rx_ring->cnsmr_idx++;
1078	rx_ring->curr_entry++;
1079	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080		rx_ring->cnsmr_idx = 0;
1081		rx_ring->curr_entry = rx_ring->cq_base;
1082	}
1083}
1084
1085static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086{
1087	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088}
1089
1090static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091						struct bq_desc *lbq_desc)
1092{
1093	if (!rx_ring->pg_chunk.page) {
1094		u64 map;
1095		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096						GFP_ATOMIC,
1097						qdev->lbq_buf_order);
1098		if (unlikely(!rx_ring->pg_chunk.page)) {
1099			netif_err(qdev, drv, qdev->ndev,
1100				  "page allocation failed.\n");
1101			return -ENOMEM;
1102		}
1103		rx_ring->pg_chunk.offset = 0;
1104		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105					0, ql_lbq_block_size(qdev),
1106					PCI_DMA_FROMDEVICE);
1107		if (pci_dma_mapping_error(qdev->pdev, map)) {
1108			__free_pages(rx_ring->pg_chunk.page,
1109					qdev->lbq_buf_order);
1110			rx_ring->pg_chunk.page = NULL;
1111			netif_err(qdev, drv, qdev->ndev,
1112				  "PCI mapping failed.\n");
1113			return -ENOMEM;
1114		}
1115		rx_ring->pg_chunk.map = map;
1116		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117	}
1118
1119	/* Copy the current master pg_chunk info
1120	 * to the current descriptor.
1121	 */
1122	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124	/* Adjust the master page chunk for next
1125	 * buffer get.
1126	 */
1127	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129		rx_ring->pg_chunk.page = NULL;
1130		lbq_desc->p.pg_chunk.last_flag = 1;
1131	} else {
1132		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133		get_page(rx_ring->pg_chunk.page);
1134		lbq_desc->p.pg_chunk.last_flag = 0;
1135	}
1136	return 0;
1137}
1138/* Process (refill) a large buffer queue. */
1139static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140{
1141	u32 clean_idx = rx_ring->lbq_clean_idx;
1142	u32 start_idx = clean_idx;
1143	struct bq_desc *lbq_desc;
1144	u64 map;
1145	int i;
1146
1147	while (rx_ring->lbq_free_cnt > 32) {
1148		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150				     "lbq: try cleaning clean_idx = %d.\n",
1151				     clean_idx);
1152			lbq_desc = &rx_ring->lbq[clean_idx];
1153			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154				rx_ring->lbq_clean_idx = clean_idx;
1155				netif_err(qdev, ifup, qdev->ndev,
1156						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157						i, clean_idx);
1158				return;
1159			}
1160
1161			map = lbq_desc->p.pg_chunk.map +
1162				lbq_desc->p.pg_chunk.offset;
1163				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164			dma_unmap_len_set(lbq_desc, maplen,
1165					rx_ring->lbq_buf_size);
1166				*lbq_desc->addr = cpu_to_le64(map);
1167
1168			pci_dma_sync_single_for_device(qdev->pdev, map,
1169						rx_ring->lbq_buf_size,
1170						PCI_DMA_FROMDEVICE);
1171			clean_idx++;
1172			if (clean_idx == rx_ring->lbq_len)
1173				clean_idx = 0;
1174		}
1175
1176		rx_ring->lbq_clean_idx = clean_idx;
1177		rx_ring->lbq_prod_idx += 16;
1178		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179			rx_ring->lbq_prod_idx = 0;
1180		rx_ring->lbq_free_cnt -= 16;
1181	}
1182
1183	if (start_idx != clean_idx) {
1184		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185			     "lbq: updating prod idx = %d.\n",
1186			     rx_ring->lbq_prod_idx);
1187		ql_write_db_reg(rx_ring->lbq_prod_idx,
1188				rx_ring->lbq_prod_idx_db_reg);
1189	}
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
1195	u32 clean_idx = rx_ring->sbq_clean_idx;
1196	u32 start_idx = clean_idx;
1197	struct bq_desc *sbq_desc;
1198	u64 map;
1199	int i;
1200
1201	while (rx_ring->sbq_free_cnt > 16) {
1202		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203			sbq_desc = &rx_ring->sbq[clean_idx];
1204			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205				     "sbq: try cleaning clean_idx = %d.\n",
1206				     clean_idx);
1207			if (sbq_desc->p.skb == NULL) {
1208				netif_printk(qdev, rx_status, KERN_DEBUG,
1209					     qdev->ndev,
1210					     "sbq: getting new skb for index %d.\n",
1211					     sbq_desc->index);
1212				sbq_desc->p.skb =
1213				    netdev_alloc_skb(qdev->ndev,
1214						     SMALL_BUFFER_SIZE);
1215				if (sbq_desc->p.skb == NULL) {
1216					rx_ring->sbq_clean_idx = clean_idx;
1217					return;
1218				}
1219				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220				map = pci_map_single(qdev->pdev,
1221						     sbq_desc->p.skb->data,
1222						     rx_ring->sbq_buf_size,
1223						     PCI_DMA_FROMDEVICE);
1224				if (pci_dma_mapping_error(qdev->pdev, map)) {
1225					netif_err(qdev, ifup, qdev->ndev,
1226						  "PCI mapping failed.\n");
1227					rx_ring->sbq_clean_idx = clean_idx;
1228					dev_kfree_skb_any(sbq_desc->p.skb);
1229					sbq_desc->p.skb = NULL;
1230					return;
1231				}
1232				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233				dma_unmap_len_set(sbq_desc, maplen,
1234						  rx_ring->sbq_buf_size);
1235				*sbq_desc->addr = cpu_to_le64(map);
1236			}
1237
1238			clean_idx++;
1239			if (clean_idx == rx_ring->sbq_len)
1240				clean_idx = 0;
1241		}
1242		rx_ring->sbq_clean_idx = clean_idx;
1243		rx_ring->sbq_prod_idx += 16;
1244		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245			rx_ring->sbq_prod_idx = 0;
1246		rx_ring->sbq_free_cnt -= 16;
1247	}
1248
1249	if (start_idx != clean_idx) {
1250		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251			     "sbq: updating prod idx = %d.\n",
1252			     rx_ring->sbq_prod_idx);
1253		ql_write_db_reg(rx_ring->sbq_prod_idx,
1254				rx_ring->sbq_prod_idx_db_reg);
1255	}
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259				    struct rx_ring *rx_ring)
1260{
1261	ql_update_sbq(qdev, rx_ring);
1262	ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269			  struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271	int i;
1272	for (i = 0; i < mapped; i++) {
1273		if (i == 0 || (i == 7 && mapped > 7)) {
1274			/*
1275			 * Unmap the skb->data area, or the
1276			 * external sglist (AKA the Outbound
1277			 * Address List (OAL)).
1278			 * If its the zeroeth element, then it's
1279			 * the skb->data area.  If it's the 7th
1280			 * element and there is more than 6 frags,
1281			 * then its an OAL.
1282			 */
1283			if (i == 7) {
1284				netif_printk(qdev, tx_done, KERN_DEBUG,
1285					     qdev->ndev,
1286					     "unmapping OAL area.\n");
1287			}
1288			pci_unmap_single(qdev->pdev,
1289					 dma_unmap_addr(&tx_ring_desc->map[i],
1290							mapaddr),
1291					 dma_unmap_len(&tx_ring_desc->map[i],
1292						       maplen),
1293					 PCI_DMA_TODEVICE);
1294		} else {
1295			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296				     "unmapping frag %d.\n", i);
1297			pci_unmap_page(qdev->pdev,
1298				       dma_unmap_addr(&tx_ring_desc->map[i],
1299						      mapaddr),
1300				       dma_unmap_len(&tx_ring_desc->map[i],
1301						     maplen), PCI_DMA_TODEVICE);
1302		}
1303	}
1304
1305}
1306
1307/* Map the buffers for this transmit.  This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
1310static int ql_map_send(struct ql_adapter *qdev,
1311		       struct ob_mac_iocb_req *mac_iocb_ptr,
1312		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313{
1314	int len = skb_headlen(skb);
1315	dma_addr_t map;
1316	int frag_idx, err, map_idx = 0;
1317	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318	int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320	if (frag_cnt) {
1321		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322			     "frag_cnt = %d.\n", frag_cnt);
1323	}
1324	/*
1325	 * Map the skb buffer first.
1326	 */
1327	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329	err = pci_dma_mapping_error(qdev->pdev, map);
1330	if (err) {
1331		netif_err(qdev, tx_queued, qdev->ndev,
1332			  "PCI mapping failed with error: %d\n", err);
1333
1334		return NETDEV_TX_BUSY;
1335	}
1336
1337	tbd->len = cpu_to_le32(len);
1338	tbd->addr = cpu_to_le64(map);
1339	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341	map_idx++;
1342
1343	/*
1344	 * This loop fills the remainder of the 8 address descriptors
1345	 * in the IOCB.  If there are more than 7 fragments, then the
1346	 * eighth address desc will point to an external list (OAL).
1347	 * When this happens, the remainder of the frags will be stored
1348	 * in this list.
1349	 */
1350	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352		tbd++;
1353		if (frag_idx == 6 && frag_cnt > 7) {
1354			/* Let's tack on an sglist.
1355			 * Our control block will now
1356			 * look like this:
1357			 * iocb->seg[0] = skb->data
1358			 * iocb->seg[1] = frag[0]
1359			 * iocb->seg[2] = frag[1]
1360			 * iocb->seg[3] = frag[2]
1361			 * iocb->seg[4] = frag[3]
1362			 * iocb->seg[5] = frag[4]
1363			 * iocb->seg[6] = frag[5]
1364			 * iocb->seg[7] = ptr to OAL (external sglist)
1365			 * oal->seg[0] = frag[6]
1366			 * oal->seg[1] = frag[7]
1367			 * oal->seg[2] = frag[8]
1368			 * oal->seg[3] = frag[9]
1369			 * oal->seg[4] = frag[10]
1370			 *      etc...
1371			 */
1372			/* Tack on the OAL in the eighth segment of IOCB. */
1373			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374					     sizeof(struct oal),
1375					     PCI_DMA_TODEVICE);
1376			err = pci_dma_mapping_error(qdev->pdev, map);
1377			if (err) {
1378				netif_err(qdev, tx_queued, qdev->ndev,
1379					  "PCI mapping outbound address list with error: %d\n",
1380					  err);
1381				goto map_error;
1382			}
1383
1384			tbd->addr = cpu_to_le64(map);
1385			/*
1386			 * The length is the number of fragments
1387			 * that remain to be mapped times the length
1388			 * of our sglist (OAL).
1389			 */
1390			tbd->len =
1391			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1392					 (frag_cnt - frag_idx)) | TX_DESC_C);
1393			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394					   map);
1395			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396					  sizeof(struct oal));
1397			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398			map_idx++;
1399		}
1400
1401		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402				       DMA_TO_DEVICE);
1403
1404		err = dma_mapping_error(&qdev->pdev->dev, map);
1405		if (err) {
1406			netif_err(qdev, tx_queued, qdev->ndev,
1407				  "PCI mapping frags failed with error: %d.\n",
1408				  err);
1409			goto map_error;
1410		}
1411
1412		tbd->addr = cpu_to_le64(map);
1413		tbd->len = cpu_to_le32(skb_frag_size(frag));
1414		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416				  skb_frag_size(frag));
1417
1418	}
1419	/* Save the number of segments we've mapped. */
1420	tx_ring_desc->map_cnt = map_idx;
1421	/* Terminate the last segment. */
1422	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423	return NETDEV_TX_OK;
1424
1425map_error:
1426	/*
1427	 * If the first frag mapping failed, then i will be zero.
1428	 * This causes the unmap of the skb->data area.  Otherwise
1429	 * we pass in the number of frags that mapped successfully
1430	 * so they can be umapped.
1431	 */
1432	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433	return NETDEV_TX_BUSY;
1434}
1435
1436/* Categorizing receive firmware frame errors */
1437static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438				 struct rx_ring *rx_ring)
1439{
1440	struct nic_stats *stats = &qdev->nic_stats;
1441
1442	stats->rx_err_count++;
1443	rx_ring->rx_errors++;
1444
1445	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447		stats->rx_code_err++;
1448		break;
1449	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450		stats->rx_oversize_err++;
1451		break;
1452	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453		stats->rx_undersize_err++;
1454		break;
1455	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456		stats->rx_preamble_err++;
1457		break;
1458	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459		stats->rx_frame_len_err++;
1460		break;
1461	case IB_MAC_IOCB_RSP_ERR_CRC:
1462		stats->rx_crc_err++;
1463	default:
1464		break;
1465	}
1466}
1467
1468/**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
1472static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1474				  void *page, size_t *len)
1475{
1476	u16 *tags;
1477
1478	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479		return;
1480	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481		tags = (u16 *)page;
1482		/* Look for stacked vlan tags in ethertype field */
1483		if (tags[6] == ETH_P_8021Q &&
1484		    tags[8] == ETH_P_8021Q)
1485			*len += 2 * VLAN_HLEN;
1486		else
1487			*len += VLAN_HLEN;
1488	}
1489}
1490
1491/* Process an inbound completion from an rx ring. */
1492static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493					struct rx_ring *rx_ring,
1494					struct ib_mac_iocb_rsp *ib_mac_rsp,
1495					u32 length,
1496					u16 vlan_id)
1497{
1498	struct sk_buff *skb;
1499	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500	struct napi_struct *napi = &rx_ring->napi;
1501
1502	/* Frame error, so drop the packet. */
1503	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505		put_page(lbq_desc->p.pg_chunk.page);
1506		return;
1507	}
1508	napi->dev = qdev->ndev;
1509
1510	skb = napi_get_frags(napi);
1511	if (!skb) {
1512		netif_err(qdev, drv, qdev->ndev,
1513			  "Couldn't get an skb, exiting.\n");
1514		rx_ring->rx_dropped++;
1515		put_page(lbq_desc->p.pg_chunk.page);
1516		return;
1517	}
1518	prefetch(lbq_desc->p.pg_chunk.va);
1519	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520			     lbq_desc->p.pg_chunk.page,
1521			     lbq_desc->p.pg_chunk.offset,
1522			     length);
1523
1524	skb->len += length;
1525	skb->data_len += length;
1526	skb->truesize += length;
1527	skb_shinfo(skb)->nr_frags++;
1528
1529	rx_ring->rx_packets++;
1530	rx_ring->rx_bytes += length;
1531	skb->ip_summed = CHECKSUM_UNNECESSARY;
1532	skb_record_rx_queue(skb, rx_ring->cq_id);
1533	if (vlan_id != 0xffff)
1534		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535	napi_gro_frags(napi);
1536}
1537
1538/* Process an inbound completion from an rx ring. */
1539static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540					struct rx_ring *rx_ring,
1541					struct ib_mac_iocb_rsp *ib_mac_rsp,
1542					u32 length,
1543					u16 vlan_id)
1544{
1545	struct net_device *ndev = qdev->ndev;
1546	struct sk_buff *skb = NULL;
1547	void *addr;
1548	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549	struct napi_struct *napi = &rx_ring->napi;
1550	size_t hlen = ETH_HLEN;
1551
1552	skb = netdev_alloc_skb(ndev, length);
1553	if (!skb) {
1554		rx_ring->rx_dropped++;
1555		put_page(lbq_desc->p.pg_chunk.page);
1556		return;
1557	}
1558
1559	addr = lbq_desc->p.pg_chunk.va;
1560	prefetch(addr);
1561
1562	/* Frame error, so drop the packet. */
1563	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565		goto err_out;
1566	}
1567
1568	/* Update the MAC header length*/
1569	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
1571	/* The max framesize filter on this chip is set higher than
1572	 * MTU since FCoE uses 2k frames.
1573	 */
1574	if (skb->len > ndev->mtu + hlen) {
1575		netif_err(qdev, drv, qdev->ndev,
1576			  "Segment too small, dropping.\n");
1577		rx_ring->rx_dropped++;
1578		goto err_out;
1579	}
1580	memcpy(skb_put(skb, hlen), addr, hlen);
1581	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583		     length);
1584	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585				lbq_desc->p.pg_chunk.offset + hlen,
1586				length - hlen);
1587	skb->len += length - hlen;
1588	skb->data_len += length - hlen;
1589	skb->truesize += length - hlen;
1590
1591	rx_ring->rx_packets++;
1592	rx_ring->rx_bytes += skb->len;
1593	skb->protocol = eth_type_trans(skb, ndev);
1594	skb_checksum_none_assert(skb);
1595
1596	if ((ndev->features & NETIF_F_RXCSUM) &&
1597		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598		/* TCP frame. */
1599		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601				     "TCP checksum done!\n");
1602			skb->ip_summed = CHECKSUM_UNNECESSARY;
1603		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605			/* Unfragmented ipv4 UDP frame. */
1606			struct iphdr *iph =
1607				(struct iphdr *)((u8 *)addr + hlen);
1608			if (!(iph->frag_off &
1609				htons(IP_MF|IP_OFFSET))) {
1610				skb->ip_summed = CHECKSUM_UNNECESSARY;
1611				netif_printk(qdev, rx_status, KERN_DEBUG,
1612					     qdev->ndev,
1613					     "UDP checksum done!\n");
1614			}
1615		}
1616	}
1617
1618	skb_record_rx_queue(skb, rx_ring->cq_id);
1619	if (vlan_id != 0xffff)
1620		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622		napi_gro_receive(napi, skb);
1623	else
1624		netif_receive_skb(skb);
1625	return;
1626err_out:
1627	dev_kfree_skb_any(skb);
1628	put_page(lbq_desc->p.pg_chunk.page);
1629}
1630
1631/* Process an inbound completion from an rx ring. */
1632static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633					struct rx_ring *rx_ring,
1634					struct ib_mac_iocb_rsp *ib_mac_rsp,
1635					u32 length,
1636					u16 vlan_id)
1637{
1638	struct net_device *ndev = qdev->ndev;
1639	struct sk_buff *skb = NULL;
1640	struct sk_buff *new_skb = NULL;
1641	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643	skb = sbq_desc->p.skb;
1644	/* Allocate new_skb and copy */
1645	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646	if (new_skb == NULL) {
1647		rx_ring->rx_dropped++;
1648		return;
1649	}
1650	skb_reserve(new_skb, NET_IP_ALIGN);
1651	memcpy(skb_put(new_skb, length), skb->data, length);
1652	skb = new_skb;
1653
1654	/* Frame error, so drop the packet. */
1655	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1656		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1657		dev_kfree_skb_any(skb);
1658		return;
1659	}
1660
1661	/* loopback self test for ethtool */
1662	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1663		ql_check_lb_frame(qdev, skb);
1664		dev_kfree_skb_any(skb);
1665		return;
1666	}
1667
1668	/* The max framesize filter on this chip is set higher than
1669	 * MTU since FCoE uses 2k frames.
1670	 */
1671	if (skb->len > ndev->mtu + ETH_HLEN) {
1672		dev_kfree_skb_any(skb);
1673		rx_ring->rx_dropped++;
1674		return;
1675	}
1676
1677	prefetch(skb->data);
1678	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1679		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1680			     "%s Multicast.\n",
1681			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1682			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1683			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1684			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1685			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1686			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1687	}
1688	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1689		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690			     "Promiscuous Packet.\n");
1691
1692	rx_ring->rx_packets++;
1693	rx_ring->rx_bytes += skb->len;
1694	skb->protocol = eth_type_trans(skb, ndev);
1695	skb_checksum_none_assert(skb);
1696
1697	/* If rx checksum is on, and there are no
1698	 * csum or frame errors.
1699	 */
1700	if ((ndev->features & NETIF_F_RXCSUM) &&
1701		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1702		/* TCP frame. */
1703		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1704			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705				     "TCP checksum done!\n");
1706			skb->ip_summed = CHECKSUM_UNNECESSARY;
1707		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1708				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1709			/* Unfragmented ipv4 UDP frame. */
1710			struct iphdr *iph = (struct iphdr *) skb->data;
1711			if (!(iph->frag_off &
1712				htons(IP_MF|IP_OFFSET))) {
1713				skb->ip_summed = CHECKSUM_UNNECESSARY;
1714				netif_printk(qdev, rx_status, KERN_DEBUG,
1715					     qdev->ndev,
1716					     "UDP checksum done!\n");
1717			}
1718		}
1719	}
1720
1721	skb_record_rx_queue(skb, rx_ring->cq_id);
1722	if (vlan_id != 0xffff)
1723		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1724	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1725		napi_gro_receive(&rx_ring->napi, skb);
1726	else
1727		netif_receive_skb(skb);
1728}
1729
1730static void ql_realign_skb(struct sk_buff *skb, int len)
1731{
1732	void *temp_addr = skb->data;
1733
1734	/* Undo the skb_reserve(skb,32) we did before
1735	 * giving to hardware, and realign data on
1736	 * a 2-byte boundary.
1737	 */
1738	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1739	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1740	skb_copy_to_linear_data(skb, temp_addr,
1741		(unsigned int)len);
1742}
1743
1744/*
1745 * This function builds an skb for the given inbound
1746 * completion.  It will be rewritten for readability in the near
1747 * future, but for not it works well.
1748 */
1749static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1750				       struct rx_ring *rx_ring,
1751				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1752{
1753	struct bq_desc *lbq_desc;
1754	struct bq_desc *sbq_desc;
1755	struct sk_buff *skb = NULL;
1756	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1757	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1758	size_t hlen = ETH_HLEN;
1759
1760	/*
1761	 * Handle the header buffer if present.
1762	 */
1763	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1764	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1765		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766			     "Header of %d bytes in small buffer.\n", hdr_len);
1767		/*
1768		 * Headers fit nicely into a small buffer.
1769		 */
1770		sbq_desc = ql_get_curr_sbuf(rx_ring);
1771		pci_unmap_single(qdev->pdev,
1772				dma_unmap_addr(sbq_desc, mapaddr),
1773				dma_unmap_len(sbq_desc, maplen),
1774				PCI_DMA_FROMDEVICE);
1775		skb = sbq_desc->p.skb;
1776		ql_realign_skb(skb, hdr_len);
1777		skb_put(skb, hdr_len);
1778		sbq_desc->p.skb = NULL;
1779	}
1780
1781	/*
1782	 * Handle the data buffer(s).
1783	 */
1784	if (unlikely(!length)) {	/* Is there data too? */
1785		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786			     "No Data buffer in this packet.\n");
1787		return skb;
1788	}
1789
1790	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1791		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793				     "Headers in small, data of %d bytes in small, combine them.\n",
1794				     length);
1795			/*
1796			 * Data is less than small buffer size so it's
1797			 * stuffed in a small buffer.
1798			 * For this case we append the data
1799			 * from the "data" small buffer to the "header" small
1800			 * buffer.
1801			 */
1802			sbq_desc = ql_get_curr_sbuf(rx_ring);
1803			pci_dma_sync_single_for_cpu(qdev->pdev,
1804						    dma_unmap_addr
1805						    (sbq_desc, mapaddr),
1806						    dma_unmap_len
1807						    (sbq_desc, maplen),
1808						    PCI_DMA_FROMDEVICE);
1809			memcpy(skb_put(skb, length),
1810			       sbq_desc->p.skb->data, length);
1811			pci_dma_sync_single_for_device(qdev->pdev,
1812						       dma_unmap_addr
1813						       (sbq_desc,
1814							mapaddr),
1815						       dma_unmap_len
1816						       (sbq_desc,
1817							maplen),
1818						       PCI_DMA_FROMDEVICE);
1819		} else {
1820			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821				     "%d bytes in a single small buffer.\n",
1822				     length);
1823			sbq_desc = ql_get_curr_sbuf(rx_ring);
1824			skb = sbq_desc->p.skb;
1825			ql_realign_skb(skb, length);
1826			skb_put(skb, length);
1827			pci_unmap_single(qdev->pdev,
1828					 dma_unmap_addr(sbq_desc,
1829							mapaddr),
1830					 dma_unmap_len(sbq_desc,
1831						       maplen),
1832					 PCI_DMA_FROMDEVICE);
1833			sbq_desc->p.skb = NULL;
1834		}
1835	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1836		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1837			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1838				     "Header in small, %d bytes in large. Chain large to small!\n",
1839				     length);
1840			/*
1841			 * The data is in a single large buffer.  We
1842			 * chain it to the header buffer's skb and let
1843			 * it rip.
1844			 */
1845			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1846			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1847				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1848				     lbq_desc->p.pg_chunk.offset, length);
1849			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1850						lbq_desc->p.pg_chunk.offset,
1851						length);
1852			skb->len += length;
1853			skb->data_len += length;
1854			skb->truesize += length;
1855		} else {
1856			/*
1857			 * The headers and data are in a single large buffer. We
1858			 * copy it to a new skb and let it go. This can happen with
1859			 * jumbo mtu on a non-TCP/UDP frame.
1860			 */
1861			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1862			skb = netdev_alloc_skb(qdev->ndev, length);
1863			if (skb == NULL) {
1864				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1865					     "No skb available, drop the packet.\n");
1866				return NULL;
1867			}
1868			pci_unmap_page(qdev->pdev,
1869				       dma_unmap_addr(lbq_desc,
1870						      mapaddr),
1871				       dma_unmap_len(lbq_desc, maplen),
1872				       PCI_DMA_FROMDEVICE);
1873			skb_reserve(skb, NET_IP_ALIGN);
1874			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1875				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1876				     length);
1877			skb_fill_page_desc(skb, 0,
1878						lbq_desc->p.pg_chunk.page,
1879						lbq_desc->p.pg_chunk.offset,
1880						length);
1881			skb->len += length;
1882			skb->data_len += length;
1883			skb->truesize += length;
1884			length -= length;
1885			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1886					      lbq_desc->p.pg_chunk.va,
1887					      &hlen);
1888			__pskb_pull_tail(skb, hlen);
1889		}
1890	} else {
1891		/*
1892		 * The data is in a chain of large buffers
1893		 * pointed to by a small buffer.  We loop
1894		 * thru and chain them to the our small header
1895		 * buffer's skb.
1896		 * frags:  There are 18 max frags and our small
1897		 *         buffer will hold 32 of them. The thing is,
1898		 *         we'll use 3 max for our 9000 byte jumbo
1899		 *         frames.  If the MTU goes up we could
1900		 *          eventually be in trouble.
1901		 */
1902		int size, i = 0;
1903		sbq_desc = ql_get_curr_sbuf(rx_ring);
1904		pci_unmap_single(qdev->pdev,
1905				 dma_unmap_addr(sbq_desc, mapaddr),
1906				 dma_unmap_len(sbq_desc, maplen),
1907				 PCI_DMA_FROMDEVICE);
1908		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1909			/*
1910			 * This is an non TCP/UDP IP frame, so
1911			 * the headers aren't split into a small
1912			 * buffer.  We have to use the small buffer
1913			 * that contains our sg list as our skb to
1914			 * send upstairs. Copy the sg list here to
1915			 * a local buffer and use it to find the
1916			 * pages to chain.
1917			 */
1918			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919				     "%d bytes of headers & data in chain of large.\n",
1920				     length);
1921			skb = sbq_desc->p.skb;
1922			sbq_desc->p.skb = NULL;
1923			skb_reserve(skb, NET_IP_ALIGN);
1924		}
1925		while (length > 0) {
1926			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1927			size = (length < rx_ring->lbq_buf_size) ? length :
1928				rx_ring->lbq_buf_size;
1929
1930			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931				     "Adding page %d to skb for %d bytes.\n",
1932				     i, size);
1933			skb_fill_page_desc(skb, i,
1934						lbq_desc->p.pg_chunk.page,
1935						lbq_desc->p.pg_chunk.offset,
1936						size);
1937			skb->len += size;
1938			skb->data_len += size;
1939			skb->truesize += size;
1940			length -= size;
1941			i++;
1942		}
1943		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1944				      &hlen);
1945		__pskb_pull_tail(skb, hlen);
1946	}
1947	return skb;
1948}
1949
1950/* Process an inbound completion from an rx ring. */
1951static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1952				   struct rx_ring *rx_ring,
1953				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1954				   u16 vlan_id)
1955{
1956	struct net_device *ndev = qdev->ndev;
1957	struct sk_buff *skb = NULL;
1958
1959	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1960
1961	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1962	if (unlikely(!skb)) {
1963		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964			     "No skb available, drop packet.\n");
1965		rx_ring->rx_dropped++;
1966		return;
1967	}
1968
1969	/* Frame error, so drop the packet. */
1970	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1971		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1972		dev_kfree_skb_any(skb);
1973		return;
1974	}
1975
1976	/* The max framesize filter on this chip is set higher than
1977	 * MTU since FCoE uses 2k frames.
1978	 */
1979	if (skb->len > ndev->mtu + ETH_HLEN) {
1980		dev_kfree_skb_any(skb);
1981		rx_ring->rx_dropped++;
1982		return;
1983	}
1984
1985	/* loopback self test for ethtool */
1986	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1987		ql_check_lb_frame(qdev, skb);
1988		dev_kfree_skb_any(skb);
1989		return;
1990	}
1991
1992	prefetch(skb->data);
1993	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1994		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1995			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1996			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1997			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1998			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1999			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2000			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2001		rx_ring->rx_multicast++;
2002	}
2003	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2004		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2005			     "Promiscuous Packet.\n");
2006	}
2007
2008	skb->protocol = eth_type_trans(skb, ndev);
2009	skb_checksum_none_assert(skb);
2010
2011	/* If rx checksum is on, and there are no
2012	 * csum or frame errors.
2013	 */
2014	if ((ndev->features & NETIF_F_RXCSUM) &&
2015		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2016		/* TCP frame. */
2017		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2018			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019				     "TCP checksum done!\n");
2020			skb->ip_summed = CHECKSUM_UNNECESSARY;
2021		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2022				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2023		/* Unfragmented ipv4 UDP frame. */
2024			struct iphdr *iph = (struct iphdr *) skb->data;
2025			if (!(iph->frag_off &
2026				htons(IP_MF|IP_OFFSET))) {
2027				skb->ip_summed = CHECKSUM_UNNECESSARY;
2028				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029					     "TCP checksum done!\n");
2030			}
2031		}
2032	}
2033
2034	rx_ring->rx_packets++;
2035	rx_ring->rx_bytes += skb->len;
2036	skb_record_rx_queue(skb, rx_ring->cq_id);
2037	if (vlan_id != 0xffff)
2038		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2039	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2040		napi_gro_receive(&rx_ring->napi, skb);
2041	else
2042		netif_receive_skb(skb);
2043}
2044
2045/* Process an inbound completion from an rx ring. */
2046static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047					struct rx_ring *rx_ring,
2048					struct ib_mac_iocb_rsp *ib_mac_rsp)
2049{
2050	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2052			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2053			((le16_to_cpu(ib_mac_rsp->vlan_id) &
2054			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2055
2056	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2057
2058	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2059		/* The data and headers are split into
2060		 * separate buffers.
2061		 */
2062		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063						vlan_id);
2064	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2065		/* The data fit in a single small buffer.
2066		 * Allocate a new skb, copy the data and
2067		 * return the buffer to the free pool.
2068		 */
2069		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2070						length, vlan_id);
2071	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2072		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2073		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2074		/* TCP packet in a page chunk that's been checksummed.
2075		 * Tack it on to our GRO skb and let it go.
2076		 */
2077		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2078						length, vlan_id);
2079	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2080		/* Non-TCP packet in a page chunk. Allocate an
2081		 * skb, tack it on frags, and send it up.
2082		 */
2083		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2084						length, vlan_id);
2085	} else {
2086		/* Non-TCP/UDP large frames that span multiple buffers
2087		 * can be processed corrrectly by the split frame logic.
2088		 */
2089		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2090						vlan_id);
2091	}
2092
2093	return (unsigned long)length;
2094}
2095
2096/* Process an outbound completion from an rx ring. */
2097static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2098				   struct ob_mac_iocb_rsp *mac_rsp)
2099{
2100	struct tx_ring *tx_ring;
2101	struct tx_ring_desc *tx_ring_desc;
2102
2103	QL_DUMP_OB_MAC_RSP(mac_rsp);
2104	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2106	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2107	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108	tx_ring->tx_packets++;
2109	dev_kfree_skb(tx_ring_desc->skb);
2110	tx_ring_desc->skb = NULL;
2111
2112	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2113					OB_MAC_IOCB_RSP_S |
2114					OB_MAC_IOCB_RSP_L |
2115					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2116		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2117			netif_warn(qdev, tx_done, qdev->ndev,
2118				   "Total descriptor length did not match transfer length.\n");
2119		}
2120		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2121			netif_warn(qdev, tx_done, qdev->ndev,
2122				   "Frame too short to be valid, not sent.\n");
2123		}
2124		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2125			netif_warn(qdev, tx_done, qdev->ndev,
2126				   "Frame too long, but sent anyway.\n");
2127		}
2128		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2129			netif_warn(qdev, tx_done, qdev->ndev,
2130				   "PCI backplane error. Frame not sent.\n");
2131		}
2132	}
2133	atomic_inc(&tx_ring->tx_count);
2134}
2135
2136/* Fire up a handler to reset the MPI processor. */
2137void ql_queue_fw_error(struct ql_adapter *qdev)
2138{
2139	ql_link_off(qdev);
2140	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2141}
2142
2143void ql_queue_asic_error(struct ql_adapter *qdev)
2144{
2145	ql_link_off(qdev);
2146	ql_disable_interrupts(qdev);
2147	/* Clear adapter up bit to signal the recovery
2148	 * process that it shouldn't kill the reset worker
2149	 * thread
2150	 */
2151	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2152	/* Set asic recovery bit to indicate reset process that we are
2153	 * in fatal error recovery process rather than normal close
2154	 */
2155	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2156	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2157}
2158
2159static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2160				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2161{
2162	switch (ib_ae_rsp->event) {
2163	case MGMT_ERR_EVENT:
2164		netif_err(qdev, rx_err, qdev->ndev,
2165			  "Management Processor Fatal Error.\n");
2166		ql_queue_fw_error(qdev);
2167		return;
2168
2169	case CAM_LOOKUP_ERR_EVENT:
2170		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2171		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2172		ql_queue_asic_error(qdev);
2173		return;
2174
2175	case SOFT_ECC_ERROR_EVENT:
2176		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2177		ql_queue_asic_error(qdev);
2178		break;
2179
2180	case PCI_ERR_ANON_BUF_RD:
2181		netdev_err(qdev->ndev, "PCI error occurred when reading "
2182					"anonymous buffers from rx_ring %d.\n",
2183					ib_ae_rsp->q_id);
2184		ql_queue_asic_error(qdev);
2185		break;
2186
2187	default:
2188		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2189			  ib_ae_rsp->event);
2190		ql_queue_asic_error(qdev);
2191		break;
2192	}
2193}
2194
2195static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2196{
2197	struct ql_adapter *qdev = rx_ring->qdev;
2198	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199	struct ob_mac_iocb_rsp *net_rsp = NULL;
2200	int count = 0;
2201
2202	struct tx_ring *tx_ring;
2203	/* While there are entries in the completion queue. */
2204	while (prod != rx_ring->cnsmr_idx) {
2205
2206		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2208			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2209
2210		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2211		rmb();
2212		switch (net_rsp->opcode) {
2213
2214		case OPCODE_OB_MAC_TSO_IOCB:
2215		case OPCODE_OB_MAC_IOCB:
2216			ql_process_mac_tx_intr(qdev, net_rsp);
2217			break;
2218		default:
2219			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2220				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2221				     net_rsp->opcode);
2222		}
2223		count++;
2224		ql_update_cq(rx_ring);
2225		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2226	}
2227	if (!net_rsp)
2228		return 0;
2229	ql_write_cq_idx(rx_ring);
2230	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2231	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2232		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2233			/*
2234			 * The queue got stopped because the tx_ring was full.
2235			 * Wake it up, because it's now at least 25% empty.
2236			 */
2237			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2238	}
2239
2240	return count;
2241}
2242
2243static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2244{
2245	struct ql_adapter *qdev = rx_ring->qdev;
2246	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2247	struct ql_net_rsp_iocb *net_rsp;
2248	int count = 0;
2249
2250	/* While there are entries in the completion queue. */
2251	while (prod != rx_ring->cnsmr_idx) {
2252
2253		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2255			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2256
2257		net_rsp = rx_ring->curr_entry;
2258		rmb();
2259		switch (net_rsp->opcode) {
2260		case OPCODE_IB_MAC_IOCB:
2261			ql_process_mac_rx_intr(qdev, rx_ring,
2262					       (struct ib_mac_iocb_rsp *)
2263					       net_rsp);
2264			break;
2265
2266		case OPCODE_IB_AE_IOCB:
2267			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2268						net_rsp);
2269			break;
2270		default:
2271			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2272				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2273				     net_rsp->opcode);
2274			break;
2275		}
2276		count++;
2277		ql_update_cq(rx_ring);
2278		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2279		if (count == budget)
2280			break;
2281	}
2282	ql_update_buffer_queues(qdev, rx_ring);
2283	ql_write_cq_idx(rx_ring);
2284	return count;
2285}
2286
2287static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2288{
2289	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2290	struct ql_adapter *qdev = rx_ring->qdev;
2291	struct rx_ring *trx_ring;
2292	int i, work_done = 0;
2293	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2294
2295	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2296		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2297
2298	/* Service the TX rings first.  They start
2299	 * right after the RSS rings. */
2300	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2301		trx_ring = &qdev->rx_ring[i];
2302		/* If this TX completion ring belongs to this vector and
2303		 * it's not empty then service it.
2304		 */
2305		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2306			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2307					trx_ring->cnsmr_idx)) {
2308			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309				     "%s: Servicing TX completion ring %d.\n",
2310				     __func__, trx_ring->cq_id);
2311			ql_clean_outbound_rx_ring(trx_ring);
2312		}
2313	}
2314
2315	/*
2316	 * Now service the RSS ring if it's active.
2317	 */
2318	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2319					rx_ring->cnsmr_idx) {
2320		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2321			     "%s: Servicing RX completion ring %d.\n",
2322			     __func__, rx_ring->cq_id);
2323		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2324	}
2325
2326	if (work_done < budget) {
2327		napi_complete(napi);
2328		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2329	}
2330	return work_done;
2331}
2332
2333static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2334{
2335	struct ql_adapter *qdev = netdev_priv(ndev);
2336
2337	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2338		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2339				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2340	} else {
2341		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2342	}
2343}
2344
2345/**
2346 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2347 * based on the features to enable/disable hardware vlan accel
2348 */
2349static int qlge_update_hw_vlan_features(struct net_device *ndev,
2350					netdev_features_t features)
2351{
2352	struct ql_adapter *qdev = netdev_priv(ndev);
2353	int status = 0;
2354
2355	status = ql_adapter_down(qdev);
2356	if (status) {
2357		netif_err(qdev, link, qdev->ndev,
2358			  "Failed to bring down the adapter\n");
2359		return status;
2360	}
2361
2362	/* update the features with resent change */
2363	ndev->features = features;
2364
2365	status = ql_adapter_up(qdev);
2366	if (status) {
2367		netif_err(qdev, link, qdev->ndev,
2368			  "Failed to bring up the adapter\n");
2369		return status;
2370	}
2371	return status;
2372}
2373
2374static netdev_features_t qlge_fix_features(struct net_device *ndev,
2375	netdev_features_t features)
2376{
2377	int err;
2378
2379	/* Update the behavior of vlan accel in the adapter */
2380	err = qlge_update_hw_vlan_features(ndev, features);
2381	if (err)
2382		return err;
2383
2384	return features;
2385}
2386
2387static int qlge_set_features(struct net_device *ndev,
2388	netdev_features_t features)
2389{
2390	netdev_features_t changed = ndev->features ^ features;
2391
2392	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2393		qlge_vlan_mode(ndev, features);
2394
2395	return 0;
2396}
2397
2398static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2399{
2400	u32 enable_bit = MAC_ADDR_E;
2401	int err;
2402
2403	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2404				  MAC_ADDR_TYPE_VLAN, vid);
2405	if (err)
2406		netif_err(qdev, ifup, qdev->ndev,
2407			  "Failed to init vlan address.\n");
2408	return err;
2409}
2410
2411static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2412{
2413	struct ql_adapter *qdev = netdev_priv(ndev);
2414	int status;
2415	int err;
2416
2417	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2418	if (status)
2419		return status;
2420
2421	err = __qlge_vlan_rx_add_vid(qdev, vid);
2422	set_bit(vid, qdev->active_vlans);
2423
2424	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2425
2426	return err;
2427}
2428
2429static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2430{
2431	u32 enable_bit = 0;
2432	int err;
2433
2434	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2435				  MAC_ADDR_TYPE_VLAN, vid);
2436	if (err)
2437		netif_err(qdev, ifup, qdev->ndev,
2438			  "Failed to clear vlan address.\n");
2439	return err;
2440}
2441
2442static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2443{
2444	struct ql_adapter *qdev = netdev_priv(ndev);
2445	int status;
2446	int err;
2447
2448	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2449	if (status)
2450		return status;
2451
2452	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2453	clear_bit(vid, qdev->active_vlans);
2454
2455	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2456
2457	return err;
2458}
2459
2460static void qlge_restore_vlan(struct ql_adapter *qdev)
2461{
2462	int status;
2463	u16 vid;
2464
2465	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2466	if (status)
2467		return;
2468
2469	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2470		__qlge_vlan_rx_add_vid(qdev, vid);
2471
2472	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2473}
2474
2475/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2476static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2477{
2478	struct rx_ring *rx_ring = dev_id;
2479	napi_schedule(&rx_ring->napi);
2480	return IRQ_HANDLED;
2481}
2482
2483/* This handles a fatal error, MPI activity, and the default
2484 * rx_ring in an MSI-X multiple vector environment.
2485 * In MSI/Legacy environment it also process the rest of
2486 * the rx_rings.
2487 */
2488static irqreturn_t qlge_isr(int irq, void *dev_id)
2489{
2490	struct rx_ring *rx_ring = dev_id;
2491	struct ql_adapter *qdev = rx_ring->qdev;
2492	struct intr_context *intr_context = &qdev->intr_context[0];
2493	u32 var;
2494	int work_done = 0;
2495
2496	spin_lock(&qdev->hw_lock);
2497	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2498		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2499			     "Shared Interrupt, Not ours!\n");
2500		spin_unlock(&qdev->hw_lock);
2501		return IRQ_NONE;
2502	}
2503	spin_unlock(&qdev->hw_lock);
2504
2505	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2506
2507	/*
2508	 * Check for fatal error.
2509	 */
2510	if (var & STS_FE) {
2511		ql_queue_asic_error(qdev);
2512		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2513		var = ql_read32(qdev, ERR_STS);
2514		netdev_err(qdev->ndev, "Resetting chip. "
2515					"Error Status Register = 0x%x\n", var);
2516		return IRQ_HANDLED;
2517	}
2518
2519	/*
2520	 * Check MPI processor activity.
2521	 */
2522	if ((var & STS_PI) &&
2523		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2524		/*
2525		 * We've got an async event or mailbox completion.
2526		 * Handle it and clear the source of the interrupt.
2527		 */
2528		netif_err(qdev, intr, qdev->ndev,
2529			  "Got MPI processor interrupt.\n");
2530		ql_disable_completion_interrupt(qdev, intr_context->intr);
2531		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2532		queue_delayed_work_on(smp_processor_id(),
2533				qdev->workqueue, &qdev->mpi_work, 0);
2534		work_done++;
2535	}
2536
2537	/*
2538	 * Get the bit-mask that shows the active queues for this
2539	 * pass.  Compare it to the queues that this irq services
2540	 * and call napi if there's a match.
2541	 */
2542	var = ql_read32(qdev, ISR1);
2543	if (var & intr_context->irq_mask) {
2544		netif_info(qdev, intr, qdev->ndev,
2545			   "Waking handler for rx_ring[0].\n");
2546		ql_disable_completion_interrupt(qdev, intr_context->intr);
2547		napi_schedule(&rx_ring->napi);
2548		work_done++;
2549	}
2550	ql_enable_completion_interrupt(qdev, intr_context->intr);
2551	return work_done ? IRQ_HANDLED : IRQ_NONE;
2552}
2553
2554static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2555{
2556
2557	if (skb_is_gso(skb)) {
2558		int err;
2559
2560		err = skb_cow_head(skb, 0);
2561		if (err < 0)
2562			return err;
2563
2564		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2565		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2566		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2567		mac_iocb_ptr->total_hdrs_len =
2568		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2569		mac_iocb_ptr->net_trans_offset =
2570		    cpu_to_le16(skb_network_offset(skb) |
2571				skb_transport_offset(skb)
2572				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2573		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2574		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2575		if (likely(skb->protocol == htons(ETH_P_IP))) {
2576			struct iphdr *iph = ip_hdr(skb);
2577			iph->check = 0;
2578			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2579			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2580								 iph->daddr, 0,
2581								 IPPROTO_TCP,
2582								 0);
2583		} else if (skb->protocol == htons(ETH_P_IPV6)) {
2584			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2585			tcp_hdr(skb)->check =
2586			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2587					     &ipv6_hdr(skb)->daddr,
2588					     0, IPPROTO_TCP, 0);
2589		}
2590		return 1;
2591	}
2592	return 0;
2593}
2594
2595static void ql_hw_csum_setup(struct sk_buff *skb,
2596			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2597{
2598	int len;
2599	struct iphdr *iph = ip_hdr(skb);
2600	__sum16 *check;
2601	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2602	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2603	mac_iocb_ptr->net_trans_offset =
2604		cpu_to_le16(skb_network_offset(skb) |
2605		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2606
2607	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2608	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2609	if (likely(iph->protocol == IPPROTO_TCP)) {
2610		check = &(tcp_hdr(skb)->check);
2611		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2612		mac_iocb_ptr->total_hdrs_len =
2613		    cpu_to_le16(skb_transport_offset(skb) +
2614				(tcp_hdr(skb)->doff << 2));
2615	} else {
2616		check = &(udp_hdr(skb)->check);
2617		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2618		mac_iocb_ptr->total_hdrs_len =
2619		    cpu_to_le16(skb_transport_offset(skb) +
2620				sizeof(struct udphdr));
2621	}
2622	*check = ~csum_tcpudp_magic(iph->saddr,
2623				    iph->daddr, len, iph->protocol, 0);
2624}
2625
2626static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2627{
2628	struct tx_ring_desc *tx_ring_desc;
2629	struct ob_mac_iocb_req *mac_iocb_ptr;
2630	struct ql_adapter *qdev = netdev_priv(ndev);
2631	int tso;
2632	struct tx_ring *tx_ring;
2633	u32 tx_ring_idx = (u32) skb->queue_mapping;
2634
2635	tx_ring = &qdev->tx_ring[tx_ring_idx];
2636
2637	if (skb_padto(skb, ETH_ZLEN))
2638		return NETDEV_TX_OK;
2639
2640	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2641		netif_info(qdev, tx_queued, qdev->ndev,
2642			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2643			   __func__, tx_ring_idx);
2644		netif_stop_subqueue(ndev, tx_ring->wq_id);
2645		tx_ring->tx_errors++;
2646		return NETDEV_TX_BUSY;
2647	}
2648	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2649	mac_iocb_ptr = tx_ring_desc->queue_entry;
2650	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2651
2652	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2653	mac_iocb_ptr->tid = tx_ring_desc->index;
2654	/* We use the upper 32-bits to store the tx queue for this IO.
2655	 * When we get the completion we can use it to establish the context.
2656	 */
2657	mac_iocb_ptr->txq_idx = tx_ring_idx;
2658	tx_ring_desc->skb = skb;
2659
2660	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2661
2662	if (vlan_tx_tag_present(skb)) {
2663		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2664			     "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2665		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2666		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2667	}
2668	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2669	if (tso < 0) {
2670		dev_kfree_skb_any(skb);
2671		return NETDEV_TX_OK;
2672	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2673		ql_hw_csum_setup(skb,
2674				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2675	}
2676	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2677			NETDEV_TX_OK) {
2678		netif_err(qdev, tx_queued, qdev->ndev,
2679			  "Could not map the segments.\n");
2680		tx_ring->tx_errors++;
2681		return NETDEV_TX_BUSY;
2682	}
2683	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2684	tx_ring->prod_idx++;
2685	if (tx_ring->prod_idx == tx_ring->wq_len)
2686		tx_ring->prod_idx = 0;
2687	wmb();
2688
2689	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2690	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2691		     "tx queued, slot %d, len %d\n",
2692		     tx_ring->prod_idx, skb->len);
2693
2694	atomic_dec(&tx_ring->tx_count);
2695
2696	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2697		netif_stop_subqueue(ndev, tx_ring->wq_id);
2698		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2699			/*
2700			 * The queue got stopped because the tx_ring was full.
2701			 * Wake it up, because it's now at least 25% empty.
2702			 */
2703			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2704	}
2705	return NETDEV_TX_OK;
2706}
2707
2708
2709static void ql_free_shadow_space(struct ql_adapter *qdev)
2710{
2711	if (qdev->rx_ring_shadow_reg_area) {
2712		pci_free_consistent(qdev->pdev,
2713				    PAGE_SIZE,
2714				    qdev->rx_ring_shadow_reg_area,
2715				    qdev->rx_ring_shadow_reg_dma);
2716		qdev->rx_ring_shadow_reg_area = NULL;
2717	}
2718	if (qdev->tx_ring_shadow_reg_area) {
2719		pci_free_consistent(qdev->pdev,
2720				    PAGE_SIZE,
2721				    qdev->tx_ring_shadow_reg_area,
2722				    qdev->tx_ring_shadow_reg_dma);
2723		qdev->tx_ring_shadow_reg_area = NULL;
2724	}
2725}
2726
2727static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2728{
2729	qdev->rx_ring_shadow_reg_area =
2730	    pci_alloc_consistent(qdev->pdev,
2731				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2732	if (qdev->rx_ring_shadow_reg_area == NULL) {
2733		netif_err(qdev, ifup, qdev->ndev,
2734			  "Allocation of RX shadow space failed.\n");
2735		return -ENOMEM;
2736	}
2737	memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2738	qdev->tx_ring_shadow_reg_area =
2739	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2740				 &qdev->tx_ring_shadow_reg_dma);
2741	if (qdev->tx_ring_shadow_reg_area == NULL) {
2742		netif_err(qdev, ifup, qdev->ndev,
2743			  "Allocation of TX shadow space failed.\n");
2744		goto err_wqp_sh_area;
2745	}
2746	memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2747	return 0;
2748
2749err_wqp_sh_area:
2750	pci_free_consistent(qdev->pdev,
2751			    PAGE_SIZE,
2752			    qdev->rx_ring_shadow_reg_area,
2753			    qdev->rx_ring_shadow_reg_dma);
2754	return -ENOMEM;
2755}
2756
2757static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2758{
2759	struct tx_ring_desc *tx_ring_desc;
2760	int i;
2761	struct ob_mac_iocb_req *mac_iocb_ptr;
2762
2763	mac_iocb_ptr = tx_ring->wq_base;
2764	tx_ring_desc = tx_ring->q;
2765	for (i = 0; i < tx_ring->wq_len; i++) {
2766		tx_ring_desc->index = i;
2767		tx_ring_desc->skb = NULL;
2768		tx_ring_desc->queue_entry = mac_iocb_ptr;
2769		mac_iocb_ptr++;
2770		tx_ring_desc++;
2771	}
2772	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2773}
2774
2775static void ql_free_tx_resources(struct ql_adapter *qdev,
2776				 struct tx_ring *tx_ring)
2777{
2778	if (tx_ring->wq_base) {
2779		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2780				    tx_ring->wq_base, tx_ring->wq_base_dma);
2781		tx_ring->wq_base = NULL;
2782	}
2783	kfree(tx_ring->q);
2784	tx_ring->q = NULL;
2785}
2786
2787static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2788				 struct tx_ring *tx_ring)
2789{
2790	tx_ring->wq_base =
2791	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2792				 &tx_ring->wq_base_dma);
2793
2794	if ((tx_ring->wq_base == NULL) ||
2795	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2796		goto pci_alloc_err;
2797
2798	tx_ring->q =
2799	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2800	if (tx_ring->q == NULL)
2801		goto err;
2802
2803	return 0;
2804err:
2805	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2806			    tx_ring->wq_base, tx_ring->wq_base_dma);
2807	tx_ring->wq_base = NULL;
2808pci_alloc_err:
2809	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2810	return -ENOMEM;
2811}
2812
2813static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2814{
2815	struct bq_desc *lbq_desc;
2816
2817	uint32_t  curr_idx, clean_idx;
2818
2819	curr_idx = rx_ring->lbq_curr_idx;
2820	clean_idx = rx_ring->lbq_clean_idx;
2821	while (curr_idx != clean_idx) {
2822		lbq_desc = &rx_ring->lbq[curr_idx];
2823
2824		if (lbq_desc->p.pg_chunk.last_flag) {
2825			pci_unmap_page(qdev->pdev,
2826				lbq_desc->p.pg_chunk.map,
2827				ql_lbq_block_size(qdev),
2828				       PCI_DMA_FROMDEVICE);
2829			lbq_desc->p.pg_chunk.last_flag = 0;
2830		}
2831
2832		put_page(lbq_desc->p.pg_chunk.page);
2833		lbq_desc->p.pg_chunk.page = NULL;
2834
2835		if (++curr_idx == rx_ring->lbq_len)
2836			curr_idx = 0;
2837
2838	}
2839	if (rx_ring->pg_chunk.page) {
2840		pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2841			ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2842		put_page(rx_ring->pg_chunk.page);
2843		rx_ring->pg_chunk.page = NULL;
2844	}
2845}
2846
2847static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2848{
2849	int i;
2850	struct bq_desc *sbq_desc;
2851
2852	for (i = 0; i < rx_ring->sbq_len; i++) {
2853		sbq_desc = &rx_ring->sbq[i];
2854		if (sbq_desc == NULL) {
2855			netif_err(qdev, ifup, qdev->ndev,
2856				  "sbq_desc %d is NULL.\n", i);
2857			return;
2858		}
2859		if (sbq_desc->p.skb) {
2860			pci_unmap_single(qdev->pdev,
2861					 dma_unmap_addr(sbq_desc, mapaddr),
2862					 dma_unmap_len(sbq_desc, maplen),
2863					 PCI_DMA_FROMDEVICE);
2864			dev_kfree_skb(sbq_desc->p.skb);
2865			sbq_desc->p.skb = NULL;
2866		}
2867	}
2868}
2869
2870/* Free all large and small rx buffers associated
2871 * with the completion queues for this device.
2872 */
2873static void ql_free_rx_buffers(struct ql_adapter *qdev)
2874{
2875	int i;
2876	struct rx_ring *rx_ring;
2877
2878	for (i = 0; i < qdev->rx_ring_count; i++) {
2879		rx_ring = &qdev->rx_ring[i];
2880		if (rx_ring->lbq)
2881			ql_free_lbq_buffers(qdev, rx_ring);
2882		if (rx_ring->sbq)
2883			ql_free_sbq_buffers(qdev, rx_ring);
2884	}
2885}
2886
2887static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2888{
2889	struct rx_ring *rx_ring;
2890	int i;
2891
2892	for (i = 0; i < qdev->rx_ring_count; i++) {
2893		rx_ring = &qdev->rx_ring[i];
2894		if (rx_ring->type != TX_Q)
2895			ql_update_buffer_queues(qdev, rx_ring);
2896	}
2897}
2898
2899static void ql_init_lbq_ring(struct ql_adapter *qdev,
2900				struct rx_ring *rx_ring)
2901{
2902	int i;
2903	struct bq_desc *lbq_desc;
2904	__le64 *bq = rx_ring->lbq_base;
2905
2906	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2907	for (i = 0; i < rx_ring->lbq_len; i++) {
2908		lbq_desc = &rx_ring->lbq[i];
2909		memset(lbq_desc, 0, sizeof(*lbq_desc));
2910		lbq_desc->index = i;
2911		lbq_desc->addr = bq;
2912		bq++;
2913	}
2914}
2915
2916static void ql_init_sbq_ring(struct ql_adapter *qdev,
2917				struct rx_ring *rx_ring)
2918{
2919	int i;
2920	struct bq_desc *sbq_desc;
2921	__le64 *bq = rx_ring->sbq_base;
2922
2923	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2924	for (i = 0; i < rx_ring->sbq_len; i++) {
2925		sbq_desc = &rx_ring->sbq[i];
2926		memset(sbq_desc, 0, sizeof(*sbq_desc));
2927		sbq_desc->index = i;
2928		sbq_desc->addr = bq;
2929		bq++;
2930	}
2931}
2932
2933static void ql_free_rx_resources(struct ql_adapter *qdev,
2934				 struct rx_ring *rx_ring)
2935{
2936	/* Free the small buffer queue. */
2937	if (rx_ring->sbq_base) {
2938		pci_free_consistent(qdev->pdev,
2939				    rx_ring->sbq_size,
2940				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2941		rx_ring->sbq_base = NULL;
2942	}
2943
2944	/* Free the small buffer queue control blocks. */
2945	kfree(rx_ring->sbq);
2946	rx_ring->sbq = NULL;
2947
2948	/* Free the large buffer queue. */
2949	if (rx_ring->lbq_base) {
2950		pci_free_consistent(qdev->pdev,
2951				    rx_ring->lbq_size,
2952				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2953		rx_ring->lbq_base = NULL;
2954	}
2955
2956	/* Free the large buffer queue control blocks. */
2957	kfree(rx_ring->lbq);
2958	rx_ring->lbq = NULL;
2959
2960	/* Free the rx queue. */
2961	if (rx_ring->cq_base) {
2962		pci_free_consistent(qdev->pdev,
2963				    rx_ring->cq_size,
2964				    rx_ring->cq_base, rx_ring->cq_base_dma);
2965		rx_ring->cq_base = NULL;
2966	}
2967}
2968
2969/* Allocate queues and buffers for this completions queue based
2970 * on the values in the parameter structure. */
2971static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2972				 struct rx_ring *rx_ring)
2973{
2974
2975	/*
2976	 * Allocate the completion queue for this rx_ring.
2977	 */
2978	rx_ring->cq_base =
2979	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2980				 &rx_ring->cq_base_dma);
2981
2982	if (rx_ring->cq_base == NULL) {
2983		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2984		return -ENOMEM;
2985	}
2986
2987	if (rx_ring->sbq_len) {
2988		/*
2989		 * Allocate small buffer queue.
2990		 */
2991		rx_ring->sbq_base =
2992		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2993					 &rx_ring->sbq_base_dma);
2994
2995		if (rx_ring->sbq_base == NULL) {
2996			netif_err(qdev, ifup, qdev->ndev,
2997				  "Small buffer queue allocation failed.\n");
2998			goto err_mem;
2999		}
3000
3001		/*
3002		 * Allocate small buffer queue control blocks.
3003		 */
3004		rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3005					     sizeof(struct bq_desc),
3006					     GFP_KERNEL);
3007		if (rx_ring->sbq == NULL)
3008			goto err_mem;
3009
3010		ql_init_sbq_ring(qdev, rx_ring);
3011	}
3012
3013	if (rx_ring->lbq_len) {
3014		/*
3015		 * Allocate large buffer queue.
3016		 */
3017		rx_ring->lbq_base =
3018		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3019					 &rx_ring->lbq_base_dma);
3020
3021		if (rx_ring->lbq_base == NULL) {
3022			netif_err(qdev, ifup, qdev->ndev,
3023				  "Large buffer queue allocation failed.\n");
3024			goto err_mem;
3025		}
3026		/*
3027		 * Allocate large buffer queue control blocks.
3028		 */
3029		rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3030					     sizeof(struct bq_desc),
3031					     GFP_KERNEL);
3032		if (rx_ring->lbq == NULL)
3033			goto err_mem;
3034
3035		ql_init_lbq_ring(qdev, rx_ring);
3036	}
3037
3038	return 0;
3039
3040err_mem:
3041	ql_free_rx_resources(qdev, rx_ring);
3042	return -ENOMEM;
3043}
3044
3045static void ql_tx_ring_clean(struct ql_adapter *qdev)
3046{
3047	struct tx_ring *tx_ring;
3048	struct tx_ring_desc *tx_ring_desc;
3049	int i, j;
3050
3051	/*
3052	 * Loop through all queues and free
3053	 * any resources.
3054	 */
3055	for (j = 0; j < qdev->tx_ring_count; j++) {
3056		tx_ring = &qdev->tx_ring[j];
3057		for (i = 0; i < tx_ring->wq_len; i++) {
3058			tx_ring_desc = &tx_ring->q[i];
3059			if (tx_ring_desc && tx_ring_desc->skb) {
3060				netif_err(qdev, ifdown, qdev->ndev,
3061					  "Freeing lost SKB %p, from queue %d, index %d.\n",
3062					  tx_ring_desc->skb, j,
3063					  tx_ring_desc->index);
3064				ql_unmap_send(qdev, tx_ring_desc,
3065					      tx_ring_desc->map_cnt);
3066				dev_kfree_skb(tx_ring_desc->skb);
3067				tx_ring_desc->skb = NULL;
3068			}
3069		}
3070	}
3071}
3072
3073static void ql_free_mem_resources(struct ql_adapter *qdev)
3074{
3075	int i;
3076
3077	for (i = 0; i < qdev->tx_ring_count; i++)
3078		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3079	for (i = 0; i < qdev->rx_ring_count; i++)
3080		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3081	ql_free_shadow_space(qdev);
3082}
3083
3084static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3085{
3086	int i;
3087
3088	/* Allocate space for our shadow registers and such. */
3089	if (ql_alloc_shadow_space(qdev))
3090		return -ENOMEM;
3091
3092	for (i = 0; i < qdev->rx_ring_count; i++) {
3093		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3094			netif_err(qdev, ifup, qdev->ndev,
3095				  "RX resource allocation failed.\n");
3096			goto err_mem;
3097		}
3098	}
3099	/* Allocate tx queue resources */
3100	for (i = 0; i < qdev->tx_ring_count; i++) {
3101		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3102			netif_err(qdev, ifup, qdev->ndev,
3103				  "TX resource allocation failed.\n");
3104			goto err_mem;
3105		}
3106	}
3107	return 0;
3108
3109err_mem:
3110	ql_free_mem_resources(qdev);
3111	return -ENOMEM;
3112}
3113
3114/* Set up the rx ring control block and pass it to the chip.
3115 * The control block is defined as
3116 * "Completion Queue Initialization Control Block", or cqicb.
3117 */
3118static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3119{
3120	struct cqicb *cqicb = &rx_ring->cqicb;
3121	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3122		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3123	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3124		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3125	void __iomem *doorbell_area =
3126	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3127	int err = 0;
3128	u16 bq_len;
3129	u64 tmp;
3130	__le64 *base_indirect_ptr;
3131	int page_entries;
3132
3133	/* Set up the shadow registers for this ring. */
3134	rx_ring->prod_idx_sh_reg = shadow_reg;
3135	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3136	*rx_ring->prod_idx_sh_reg = 0;
3137	shadow_reg += sizeof(u64);
3138	shadow_reg_dma += sizeof(u64);
3139	rx_ring->lbq_base_indirect = shadow_reg;
3140	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3141	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3142	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3143	rx_ring->sbq_base_indirect = shadow_reg;
3144	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3145
3146	/* PCI doorbell mem area + 0x00 for consumer index register */
3147	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3148	rx_ring->cnsmr_idx = 0;
3149	rx_ring->curr_entry = rx_ring->cq_base;
3150
3151	/* PCI doorbell mem area + 0x04 for valid register */
3152	rx_ring->valid_db_reg = doorbell_area + 0x04;
3153
3154	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3155	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3156
3157	/* PCI doorbell mem area + 0x1c */
3158	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3159
3160	memset((void *)cqicb, 0, sizeof(struct cqicb));
3161	cqicb->msix_vect = rx_ring->irq;
3162
3163	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3164	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3165
3166	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3167
3168	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3169
3170	/*
3171	 * Set up the control block load flags.
3172	 */
3173	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3174	    FLAGS_LV |		/* Load MSI-X vector */
3175	    FLAGS_LI;		/* Load irq delay values */
3176	if (rx_ring->lbq_len) {
3177		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3178		tmp = (u64)rx_ring->lbq_base_dma;
3179		base_indirect_ptr = rx_ring->lbq_base_indirect;
3180		page_entries = 0;
3181		do {
3182			*base_indirect_ptr = cpu_to_le64(tmp);
3183			tmp += DB_PAGE_SIZE;
3184			base_indirect_ptr++;
3185			page_entries++;
3186		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3187		cqicb->lbq_addr =
3188		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3189		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3190			(u16) rx_ring->lbq_buf_size;
3191		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3192		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3193			(u16) rx_ring->lbq_len;
3194		cqicb->lbq_len = cpu_to_le16(bq_len);
3195		rx_ring->lbq_prod_idx = 0;
3196		rx_ring->lbq_curr_idx = 0;
3197		rx_ring->lbq_clean_idx = 0;
3198		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3199	}
3200	if (rx_ring->sbq_len) {
3201		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3202		tmp = (u64)rx_ring->sbq_base_dma;
3203		base_indirect_ptr = rx_ring->sbq_base_indirect;
3204		page_entries = 0;
3205		do {
3206			*base_indirect_ptr = cpu_to_le64(tmp);
3207			tmp += DB_PAGE_SIZE;
3208			base_indirect_ptr++;
3209			page_entries++;
3210		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3211		cqicb->sbq_addr =
3212		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3213		cqicb->sbq_buf_size =
3214		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3215		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3216			(u16) rx_ring->sbq_len;
3217		cqicb->sbq_len = cpu_to_le16(bq_len);
3218		rx_ring->sbq_prod_idx = 0;
3219		rx_ring->sbq_curr_idx = 0;
3220		rx_ring->sbq_clean_idx = 0;
3221		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3222	}
3223	switch (rx_ring->type) {
3224	case TX_Q:
3225		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3226		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3227		break;
3228	case RX_Q:
3229		/* Inbound completion handling rx_rings run in
3230		 * separate NAPI contexts.
3231		 */
3232		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3233			       64);
3234		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3235		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3236		break;
3237	default:
3238		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3239			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3240	}
3241	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3242			   CFG_LCQ, rx_ring->cq_id);
3243	if (err) {
3244		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3245		return err;
3246	}
3247	return err;
3248}
3249
3250static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3251{
3252	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3253	void __iomem *doorbell_area =
3254	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3255	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3256	    (tx_ring->wq_id * sizeof(u64));
3257	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3258	    (tx_ring->wq_id * sizeof(u64));
3259	int err = 0;
3260
3261	/*
3262	 * Assign doorbell registers for this tx_ring.
3263	 */
3264	/* TX PCI doorbell mem area for tx producer index */
3265	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3266	tx_ring->prod_idx = 0;
3267	/* TX PCI doorbell mem area + 0x04 */
3268	tx_ring->valid_db_reg = doorbell_area + 0x04;
3269
3270	/*
3271	 * Assign shadow registers for this tx_ring.
3272	 */
3273	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3274	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3275
3276	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3277	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3278				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3279	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3280	wqicb->rid = 0;
3281	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3282
3283	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3284
3285	ql_init_tx_ring(qdev, tx_ring);
3286
3287	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3288			   (u16) tx_ring->wq_id);
3289	if (err) {
3290		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3291		return err;
3292	}
3293	return err;
3294}
3295
3296static void ql_disable_msix(struct ql_adapter *qdev)
3297{
3298	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3299		pci_disable_msix(qdev->pdev);
3300		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3301		kfree(qdev->msi_x_entry);
3302		qdev->msi_x_entry = NULL;
3303	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3304		pci_disable_msi(qdev->pdev);
3305		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3306	}
3307}
3308
3309/* We start by trying to get the number of vectors
3310 * stored in qdev->intr_count. If we don't get that
3311 * many then we reduce the count and try again.
3312 */
3313static void ql_enable_msix(struct ql_adapter *qdev)
3314{
3315	int i, err;
3316
3317	/* Get the MSIX vectors. */
3318	if (qlge_irq_type == MSIX_IRQ) {
3319		/* Try to alloc space for the msix struct,
3320		 * if it fails then go to MSI/legacy.
3321		 */
3322		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3323					    sizeof(struct msix_entry),
3324					    GFP_KERNEL);
3325		if (!qdev->msi_x_entry) {
3326			qlge_irq_type = MSI_IRQ;
3327			goto msi;
3328		}
3329
3330		for (i = 0; i < qdev->intr_count; i++)
3331			qdev->msi_x_entry[i].entry = i;
3332
3333		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3334					    1, qdev->intr_count);
3335		if (err < 0) {
3336			kfree(qdev->msi_x_entry);
3337			qdev->msi_x_entry = NULL;
3338			netif_warn(qdev, ifup, qdev->ndev,
3339				   "MSI-X Enable failed, trying MSI.\n");
3340			qlge_irq_type = MSI_IRQ;
3341		} else {
3342			qdev->intr_count = err;
3343			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3344			netif_info(qdev, ifup, qdev->ndev,
3345				   "MSI-X Enabled, got %d vectors.\n",
3346				   qdev->intr_count);
3347			return;
3348		}
3349	}
3350msi:
3351	qdev->intr_count = 1;
3352	if (qlge_irq_type == MSI_IRQ) {
3353		if (!pci_enable_msi(qdev->pdev)) {
3354			set_bit(QL_MSI_ENABLED, &qdev->flags);
3355			netif_info(qdev, ifup, qdev->ndev,
3356				   "Running with MSI interrupts.\n");
3357			return;
3358		}
3359	}
3360	qlge_irq_type = LEG_IRQ;
3361	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3362		     "Running with legacy interrupts.\n");
3363}
3364
3365/* Each vector services 1 RSS ring and and 1 or more
3366 * TX completion rings.  This function loops through
3367 * the TX completion rings and assigns the vector that
3368 * will service it.  An example would be if there are
3369 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3370 * This would mean that vector 0 would service RSS ring 0
3371 * and TX completion rings 0,1,2 and 3.  Vector 1 would
3372 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3373 */
3374static void ql_set_tx_vect(struct ql_adapter *qdev)
3375{
3376	int i, j, vect;
3377	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3378
3379	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3380		/* Assign irq vectors to TX rx_rings.*/
3381		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3382					 i < qdev->rx_ring_count; i++) {
3383			if (j == tx_rings_per_vector) {
3384				vect++;
3385				j = 0;
3386			}
3387			qdev->rx_ring[i].irq = vect;
3388			j++;
3389		}
3390	} else {
3391		/* For single vector all rings have an irq
3392		 * of zero.
3393		 */
3394		for (i = 0; i < qdev->rx_ring_count; i++)
3395			qdev->rx_ring[i].irq = 0;
3396	}
3397}
3398
3399/* Set the interrupt mask for this vector.  Each vector
3400 * will service 1 RSS ring and 1 or more TX completion
3401 * rings.  This function sets up a bit mask per vector
3402 * that indicates which rings it services.
3403 */
3404static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3405{
3406	int j, vect = ctx->intr;
3407	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3408
3409	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3410		/* Add the RSS ring serviced by this vector
3411		 * to the mask.
3412		 */
3413		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3414		/* Add the TX ring(s) serviced by this vector
3415		 * to the mask. */
3416		for (j = 0; j < tx_rings_per_vector; j++) {
3417			ctx->irq_mask |=
3418			(1 << qdev->rx_ring[qdev->rss_ring_count +
3419			(vect * tx_rings_per_vector) + j].cq_id);
3420		}
3421	} else {
3422		/* For single vector we just shift each queue's
3423		 * ID into the mask.
3424		 */
3425		for (j = 0; j < qdev->rx_ring_count; j++)
3426			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3427	}
3428}
3429
3430/*
3431 * Here we build the intr_context structures based on
3432 * our rx_ring count and intr vector count.
3433 * The intr_context structure is used to hook each vector
3434 * to possibly different handlers.
3435 */
3436static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3437{
3438	int i = 0;
3439	struct intr_context *intr_context = &qdev->intr_context[0];
3440
3441	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3442		/* Each rx_ring has it's
3443		 * own intr_context since we have separate
3444		 * vectors for each queue.
3445		 */
3446		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447			qdev->rx_ring[i].irq = i;
3448			intr_context->intr = i;
3449			intr_context->qdev = qdev;
3450			/* Set up this vector's bit-mask that indicates
3451			 * which queues it services.
3452			 */
3453			ql_set_irq_mask(qdev, intr_context);
3454			/*
3455			 * We set up each vectors enable/disable/read bits so
3456			 * there's no bit/mask calculations in the critical path.
3457			 */
3458			intr_context->intr_en_mask =
3459			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3460			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3461			    | i;
3462			intr_context->intr_dis_mask =
3463			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3464			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3465			    INTR_EN_IHD | i;
3466			intr_context->intr_read_mask =
3467			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3468			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3469			    i;
3470			if (i == 0) {
3471				/* The first vector/queue handles
3472				 * broadcast/multicast, fatal errors,
3473				 * and firmware events.  This in addition
3474				 * to normal inbound NAPI processing.
3475				 */
3476				intr_context->handler = qlge_isr;
3477				sprintf(intr_context->name, "%s-rx-%d",
3478					qdev->ndev->name, i);
3479			} else {
3480				/*
3481				 * Inbound queues handle unicast frames only.
3482				 */
3483				intr_context->handler = qlge_msix_rx_isr;
3484				sprintf(intr_context->name, "%s-rx-%d",
3485					qdev->ndev->name, i);
3486			}
3487		}
3488	} else {
3489		/*
3490		 * All rx_rings use the same intr_context since
3491		 * there is only one vector.
3492		 */
3493		intr_context->intr = 0;
3494		intr_context->qdev = qdev;
3495		/*
3496		 * We set up each vectors enable/disable/read bits so
3497		 * there's no bit/mask calculations in the critical path.
3498		 */
3499		intr_context->intr_en_mask =
3500		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3501		intr_context->intr_dis_mask =
3502		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3503		    INTR_EN_TYPE_DISABLE;
3504		intr_context->intr_read_mask =
3505		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3506		/*
3507		 * Single interrupt means one handler for all rings.
3508		 */
3509		intr_context->handler = qlge_isr;
3510		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3511		/* Set up this vector's bit-mask that indicates
3512		 * which queues it services. In this case there is
3513		 * a single vector so it will service all RSS and
3514		 * TX completion rings.
3515		 */
3516		ql_set_irq_mask(qdev, intr_context);
3517	}
3518	/* Tell the TX completion rings which MSIx vector
3519	 * they will be using.
3520	 */
3521	ql_set_tx_vect(qdev);
3522}
3523
3524static void ql_free_irq(struct ql_adapter *qdev)
3525{
3526	int i;
3527	struct intr_context *intr_context = &qdev->intr_context[0];
3528
3529	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3530		if (intr_context->hooked) {
3531			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3532				free_irq(qdev->msi_x_entry[i].vector,
3533					 &qdev->rx_ring[i]);
3534			} else {
3535				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3536			}
3537		}
3538	}
3539	ql_disable_msix(qdev);
3540}
3541
3542static int ql_request_irq(struct ql_adapter *qdev)
3543{
3544	int i;
3545	int status = 0;
3546	struct pci_dev *pdev = qdev->pdev;
3547	struct intr_context *intr_context = &qdev->intr_context[0];
3548
3549	ql_resolve_queues_to_irqs(qdev);
3550
3551	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3552		atomic_set(&intr_context->irq_cnt, 0);
3553		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3554			status = request_irq(qdev->msi_x_entry[i].vector,
3555					     intr_context->handler,
3556					     0,
3557					     intr_context->name,
3558					     &qdev->rx_ring[i]);
3559			if (status) {
3560				netif_err(qdev, ifup, qdev->ndev,
3561					  "Failed request for MSIX interrupt %d.\n",
3562					  i);
3563				goto err_irq;
3564			}
3565		} else {
3566			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3567				     "trying msi or legacy interrupts.\n");
3568			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3569				     "%s: irq = %d.\n", __func__, pdev->irq);
3570			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3571				     "%s: context->name = %s.\n", __func__,
3572				     intr_context->name);
3573			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3574				     "%s: dev_id = 0x%p.\n", __func__,
3575				     &qdev->rx_ring[0]);
3576			status =
3577			    request_irq(pdev->irq, qlge_isr,
3578					test_bit(QL_MSI_ENABLED,
3579						 &qdev->
3580						 flags) ? 0 : IRQF_SHARED,
3581					intr_context->name, &qdev->rx_ring[0]);
3582			if (status)
3583				goto err_irq;
3584
3585			netif_err(qdev, ifup, qdev->ndev,
3586				  "Hooked intr %d, queue type %s, with name %s.\n",
3587				  i,
3588				  qdev->rx_ring[0].type == DEFAULT_Q ?
3589				  "DEFAULT_Q" :
3590				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3591				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3592				  intr_context->name);
3593		}
3594		intr_context->hooked = 1;
3595	}
3596	return status;
3597err_irq:
3598	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3599	ql_free_irq(qdev);
3600	return status;
3601}
3602
3603static int ql_start_rss(struct ql_adapter *qdev)
3604{
3605	static const u8 init_hash_seed[] = {
3606		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3607		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3608		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3609		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3610		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3611	};
3612	struct ricb *ricb = &qdev->ricb;
3613	int status = 0;
3614	int i;
3615	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3616
3617	memset((void *)ricb, 0, sizeof(*ricb));
3618
3619	ricb->base_cq = RSS_L4K;
3620	ricb->flags =
3621		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3622	ricb->mask = cpu_to_le16((u16)(0x3ff));
3623
3624	/*
3625	 * Fill out the Indirection Table.
3626	 */
3627	for (i = 0; i < 1024; i++)
3628		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3629
3630	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3631	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3632
3633	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3634	if (status) {
3635		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3636		return status;
3637	}
3638	return status;
3639}
3640
3641static int ql_clear_routing_entries(struct ql_adapter *qdev)
3642{
3643	int i, status = 0;
3644
3645	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3646	if (status)
3647		return status;
3648	/* Clear all the entries in the routing table. */
3649	for (i = 0; i < 16; i++) {
3650		status = ql_set_routing_reg(qdev, i, 0, 0);
3651		if (status) {
3652			netif_err(qdev, ifup, qdev->ndev,
3653				  "Failed to init routing register for CAM packets.\n");
3654			break;
3655		}
3656	}
3657	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3658	return status;
3659}
3660
3661/* Initialize the frame-to-queue routing. */
3662static int ql_route_initialize(struct ql_adapter *qdev)
3663{
3664	int status = 0;
3665
3666	/* Clear all the entries in the routing table. */
3667	status = ql_clear_routing_entries(qdev);
3668	if (status)
3669		return status;
3670
3671	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3672	if (status)
3673		return status;
3674
3675	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3676						RT_IDX_IP_CSUM_ERR, 1);
3677	if (status) {
3678		netif_err(qdev, ifup, qdev->ndev,
3679			"Failed to init routing register "
3680			"for IP CSUM error packets.\n");
3681		goto exit;
3682	}
3683	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3684						RT_IDX_TU_CSUM_ERR, 1);
3685	if (status) {
3686		netif_err(qdev, ifup, qdev->ndev,
3687			"Failed to init routing register "
3688			"for TCP/UDP CSUM error packets.\n");
3689		goto exit;
3690	}
3691	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3692	if (status) {
3693		netif_err(qdev, ifup, qdev->ndev,
3694			  "Failed to init routing register for broadcast packets.\n");
3695		goto exit;
3696	}
3697	/* If we have more than one inbound queue, then turn on RSS in the
3698	 * routing block.
3699	 */
3700	if (qdev->rss_ring_count > 1) {
3701		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3702					RT_IDX_RSS_MATCH, 1);
3703		if (status) {
3704			netif_err(qdev, ifup, qdev->ndev,
3705				  "Failed to init routing register for MATCH RSS packets.\n");
3706			goto exit;
3707		}
3708	}
3709
3710	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3711				    RT_IDX_CAM_HIT, 1);
3712	if (status)
3713		netif_err(qdev, ifup, qdev->ndev,
3714			  "Failed to init routing register for CAM packets.\n");
3715exit:
3716	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3717	return status;
3718}
3719
3720int ql_cam_route_initialize(struct ql_adapter *qdev)
3721{
3722	int status, set;
3723
3724	/* If check if the link is up and use to
3725	 * determine if we are setting or clearing
3726	 * the MAC address in the CAM.
3727	 */
3728	set = ql_read32(qdev, STS);
3729	set &= qdev->port_link_up;
3730	status = ql_set_mac_addr(qdev, set);
3731	if (status) {
3732		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3733		return status;
3734	}
3735
3736	status = ql_route_initialize(qdev);
3737	if (status)
3738		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3739
3740	return status;
3741}
3742
3743static int ql_adapter_initialize(struct ql_adapter *qdev)
3744{
3745	u32 value, mask;
3746	int i;
3747	int status = 0;
3748
3749	/*
3750	 * Set up the System register to halt on errors.
3751	 */
3752	value = SYS_EFE | SYS_FAE;
3753	mask = value << 16;
3754	ql_write32(qdev, SYS, mask | value);
3755
3756	/* Set the default queue, and VLAN behavior. */
3757	value = NIC_RCV_CFG_DFQ;
3758	mask = NIC_RCV_CFG_DFQ_MASK;
3759	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3760		value |= NIC_RCV_CFG_RV;
3761		mask |= (NIC_RCV_CFG_RV << 16);
3762	}
3763	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3764
3765	/* Set the MPI interrupt to enabled. */
3766	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3767
3768	/* Enable the function, set pagesize, enable error checking. */
3769	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3770	    FSC_EC | FSC_VM_PAGE_4K;
3771	value |= SPLT_SETTING;
3772
3773	/* Set/clear header splitting. */
3774	mask = FSC_VM_PAGESIZE_MASK |
3775	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3776	ql_write32(qdev, FSC, mask | value);
3777
3778	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3779
3780	/* Set RX packet routing to use port/pci function on which the
3781	 * packet arrived on in addition to usual frame routing.
3782	 * This is helpful on bonding where both interfaces can have
3783	 * the same MAC address.
3784	 */
3785	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3786	/* Reroute all packets to our Interface.
3787	 * They may have been routed to MPI firmware
3788	 * due to WOL.
3789	 */
3790	value = ql_read32(qdev, MGMT_RCV_CFG);
3791	value &= ~MGMT_RCV_CFG_RM;
3792	mask = 0xffff0000;
3793
3794	/* Sticky reg needs clearing due to WOL. */
3795	ql_write32(qdev, MGMT_RCV_CFG, mask);
3796	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3797
3798	/* Default WOL is enable on Mezz cards */
3799	if (qdev->pdev->subsystem_device == 0x0068 ||
3800			qdev->pdev->subsystem_device == 0x0180)
3801		qdev->wol = WAKE_MAGIC;
3802
3803	/* Start up the rx queues. */
3804	for (i = 0; i < qdev->rx_ring_count; i++) {
3805		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3806		if (status) {
3807			netif_err(qdev, ifup, qdev->ndev,
3808				  "Failed to start rx ring[%d].\n", i);
3809			return status;
3810		}
3811	}
3812
3813	/* If there is more than one inbound completion queue
3814	 * then download a RICB to configure RSS.
3815	 */
3816	if (qdev->rss_ring_count > 1) {
3817		status = ql_start_rss(qdev);
3818		if (status) {
3819			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3820			return status;
3821		}
3822	}
3823
3824	/* Start up the tx queues. */
3825	for (i = 0; i < qdev->tx_ring_count; i++) {
3826		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3827		if (status) {
3828			netif_err(qdev, ifup, qdev->ndev,
3829				  "Failed to start tx ring[%d].\n", i);
3830			return status;
3831		}
3832	}
3833
3834	/* Initialize the port and set the max framesize. */
3835	status = qdev->nic_ops->port_initialize(qdev);
3836	if (status)
3837		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3838
3839	/* Set up the MAC address and frame routing filter. */
3840	status = ql_cam_route_initialize(qdev);
3841	if (status) {
3842		netif_err(qdev, ifup, qdev->ndev,
3843			  "Failed to init CAM/Routing tables.\n");
3844		return status;
3845	}
3846
3847	/* Start NAPI for the RSS queues. */
3848	for (i = 0; i < qdev->rss_ring_count; i++)
3849		napi_enable(&qdev->rx_ring[i].napi);
3850
3851	return status;
3852}
3853
3854/* Issue soft reset to chip. */
3855static int ql_adapter_reset(struct ql_adapter *qdev)
3856{
3857	u32 value;
3858	int status = 0;
3859	unsigned long end_jiffies;
3860
3861	/* Clear all the entries in the routing table. */
3862	status = ql_clear_routing_entries(qdev);
3863	if (status) {
3864		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3865		return status;
3866	}
3867
3868	end_jiffies = jiffies +
3869		max((unsigned long)1, usecs_to_jiffies(30));
3870
3871	/* Check if bit is set then skip the mailbox command and
3872	 * clear the bit, else we are in normal reset process.
3873	 */
3874	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3875		/* Stop management traffic. */
3876		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3877
3878		/* Wait for the NIC and MGMNT FIFOs to empty. */
3879		ql_wait_fifo_empty(qdev);
3880	} else
3881		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3882
3883	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3884
3885	do {
3886		value = ql_read32(qdev, RST_FO);
3887		if ((value & RST_FO_FR) == 0)
3888			break;
3889		cpu_relax();
3890	} while (time_before(jiffies, end_jiffies));
3891
3892	if (value & RST_FO_FR) {
3893		netif_err(qdev, ifdown, qdev->ndev,
3894			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3895		status = -ETIMEDOUT;
3896	}
3897
3898	/* Resume management traffic. */
3899	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3900	return status;
3901}
3902
3903static void ql_display_dev_info(struct net_device *ndev)
3904{
3905	struct ql_adapter *qdev = netdev_priv(ndev);
3906
3907	netif_info(qdev, probe, qdev->ndev,
3908		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3909		   "XG Roll = %d, XG Rev = %d.\n",
3910		   qdev->func,
3911		   qdev->port,
3912		   qdev->chip_rev_id & 0x0000000f,
3913		   qdev->chip_rev_id >> 4 & 0x0000000f,
3914		   qdev->chip_rev_id >> 8 & 0x0000000f,
3915		   qdev->chip_rev_id >> 12 & 0x0000000f);
3916	netif_info(qdev, probe, qdev->ndev,
3917		   "MAC address %pM\n", ndev->dev_addr);
3918}
3919
3920static int ql_wol(struct ql_adapter *qdev)
3921{
3922	int status = 0;
3923	u32 wol = MB_WOL_DISABLE;
3924
3925	/* The CAM is still intact after a reset, but if we
3926	 * are doing WOL, then we may need to program the
3927	 * routing regs. We would also need to issue the mailbox
3928	 * commands to instruct the MPI what to do per the ethtool
3929	 * settings.
3930	 */
3931
3932	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3933			WAKE_MCAST | WAKE_BCAST)) {
3934		netif_err(qdev, ifdown, qdev->ndev,
3935			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3936			  qdev->wol);
3937		return -EINVAL;
3938	}
3939
3940	if (qdev->wol & WAKE_MAGIC) {
3941		status = ql_mb_wol_set_magic(qdev, 1);
3942		if (status) {
3943			netif_err(qdev, ifdown, qdev->ndev,
3944				  "Failed to set magic packet on %s.\n",
3945				  qdev->ndev->name);
3946			return status;
3947		} else
3948			netif_info(qdev, drv, qdev->ndev,
3949				   "Enabled magic packet successfully on %s.\n",
3950				   qdev->ndev->name);
3951
3952		wol |= MB_WOL_MAGIC_PKT;
3953	}
3954
3955	if (qdev->wol) {
3956		wol |= MB_WOL_MODE_ON;
3957		status = ql_mb_wol_mode(qdev, wol);
3958		netif_err(qdev, drv, qdev->ndev,
3959			  "WOL %s (wol code 0x%x) on %s\n",
3960			  (status == 0) ? "Successfully set" : "Failed",
3961			  wol, qdev->ndev->name);
3962	}
3963
3964	return status;
3965}
3966
3967static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3968{
3969
3970	/* Don't kill the reset worker thread if we
3971	 * are in the process of recovery.
3972	 */
3973	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3974		cancel_delayed_work_sync(&qdev->asic_reset_work);
3975	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3976	cancel_delayed_work_sync(&qdev->mpi_work);
3977	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3978	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3979	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3980}
3981
3982static int ql_adapter_down(struct ql_adapter *qdev)
3983{
3984	int i, status = 0;
3985
3986	ql_link_off(qdev);
3987
3988	ql_cancel_all_work_sync(qdev);
3989
3990	for (i = 0; i < qdev->rss_ring_count; i++)
3991		napi_disable(&qdev->rx_ring[i].napi);
3992
3993	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3994
3995	ql_disable_interrupts(qdev);
3996
3997	ql_tx_ring_clean(qdev);
3998
3999	/* Call netif_napi_del() from common point.
4000	 */
4001	for (i = 0; i < qdev->rss_ring_count; i++)
4002		netif_napi_del(&qdev->rx_ring[i].napi);
4003
4004	status = ql_adapter_reset(qdev);
4005	if (status)
4006		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4007			  qdev->func);
4008	ql_free_rx_buffers(qdev);
4009
4010	return status;
4011}
4012
4013static int ql_adapter_up(struct ql_adapter *qdev)
4014{
4015	int err = 0;
4016
4017	err = ql_adapter_initialize(qdev);
4018	if (err) {
4019		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4020		goto err_init;
4021	}
4022	set_bit(QL_ADAPTER_UP, &qdev->flags);
4023	ql_alloc_rx_buffers(qdev);
4024	/* If the port is initialized and the
4025	 * link is up the turn on the carrier.
4026	 */
4027	if ((ql_read32(qdev, STS) & qdev->port_init) &&
4028			(ql_read32(qdev, STS) & qdev->port_link_up))
4029		ql_link_on(qdev);
4030	/* Restore rx mode. */
4031	clear_bit(QL_ALLMULTI, &qdev->flags);
4032	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4033	qlge_set_multicast_list(qdev->ndev);
4034
4035	/* Restore vlan setting. */
4036	qlge_restore_vlan(qdev);
4037
4038	ql_enable_interrupts(qdev);
4039	ql_enable_all_completion_interrupts(qdev);
4040	netif_tx_start_all_queues(qdev->ndev);
4041
4042	return 0;
4043err_init:
4044	ql_adapter_reset(qdev);
4045	return err;
4046}
4047
4048static void ql_release_adapter_resources(struct ql_adapter *qdev)
4049{
4050	ql_free_mem_resources(qdev);
4051	ql_free_irq(qdev);
4052}
4053
4054static int ql_get_adapter_resources(struct ql_adapter *qdev)
4055{
4056	int status = 0;
4057
4058	if (ql_alloc_mem_resources(qdev)) {
4059		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4060		return -ENOMEM;
4061	}
4062	status = ql_request_irq(qdev);
4063	return status;
4064}
4065
4066static int qlge_close(struct net_device *ndev)
4067{
4068	struct ql_adapter *qdev = netdev_priv(ndev);
4069
4070	/* If we hit pci_channel_io_perm_failure
4071	 * failure condition, then we already
4072	 * brought the adapter down.
4073	 */
4074	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4075		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4076		clear_bit(QL_EEH_FATAL, &qdev->flags);
4077		return 0;
4078	}
4079
4080	/*
4081	 * Wait for device to recover from a reset.
4082	 * (Rarely happens, but possible.)
4083	 */
4084	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4085		msleep(1);
4086	ql_adapter_down(qdev);
4087	ql_release_adapter_resources(qdev);
4088	return 0;
4089}
4090
4091static int ql_configure_rings(struct ql_adapter *qdev)
4092{
4093	int i;
4094	struct rx_ring *rx_ring;
4095	struct tx_ring *tx_ring;
4096	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4097	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4098		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4099
4100	qdev->lbq_buf_order = get_order(lbq_buf_len);
4101
4102	/* In a perfect world we have one RSS ring for each CPU
4103	 * and each has it's own vector.  To do that we ask for
4104	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4105	 * vector count to what we actually get.  We then
4106	 * allocate an RSS ring for each.
4107	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4108	 */
4109	qdev->intr_count = cpu_cnt;
4110	ql_enable_msix(qdev);
4111	/* Adjust the RSS ring count to the actual vector count. */
4112	qdev->rss_ring_count = qdev->intr_count;
4113	qdev->tx_ring_count = cpu_cnt;
4114	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4115
4116	for (i = 0; i < qdev->tx_ring_count; i++) {
4117		tx_ring = &qdev->tx_ring[i];
4118		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4119		tx_ring->qdev = qdev;
4120		tx_ring->wq_id = i;
4121		tx_ring->wq_len = qdev->tx_ring_size;
4122		tx_ring->wq_size =
4123		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4124
4125		/*
4126		 * The completion queue ID for the tx rings start
4127		 * immediately after the rss rings.
4128		 */
4129		tx_ring->cq_id = qdev->rss_ring_count + i;
4130	}
4131
4132	for (i = 0; i < qdev->rx_ring_count; i++) {
4133		rx_ring = &qdev->rx_ring[i];
4134		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4135		rx_ring->qdev = qdev;
4136		rx_ring->cq_id = i;
4137		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4138		if (i < qdev->rss_ring_count) {
4139			/*
4140			 * Inbound (RSS) queues.
4141			 */
4142			rx_ring->cq_len = qdev->rx_ring_size;
4143			rx_ring->cq_size =
4144			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4145			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4146			rx_ring->lbq_size =
4147			    rx_ring->lbq_len * sizeof(__le64);
4148			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4149			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4150			rx_ring->sbq_size =
4151			    rx_ring->sbq_len * sizeof(__le64);
4152			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4153			rx_ring->type = RX_Q;
4154		} else {
4155			/*
4156			 * Outbound queue handles outbound completions only.
4157			 */
4158			/* outbound cq is same size as tx_ring it services. */
4159			rx_ring->cq_len = qdev->tx_ring_size;
4160			rx_ring->cq_size =
4161			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4162			rx_ring->lbq_len = 0;
4163			rx_ring->lbq_size = 0;
4164			rx_ring->lbq_buf_size = 0;
4165			rx_ring->sbq_len = 0;
4166			rx_ring->sbq_size = 0;
4167			rx_ring->sbq_buf_size = 0;
4168			rx_ring->type = TX_Q;
4169		}
4170	}
4171	return 0;
4172}
4173
4174static int qlge_open(struct net_device *ndev)
4175{
4176	int err = 0;
4177	struct ql_adapter *qdev = netdev_priv(ndev);
4178
4179	err = ql_adapter_reset(qdev);
4180	if (err)
4181		return err;
4182
4183	err = ql_configure_rings(qdev);
4184	if (err)
4185		return err;
4186
4187	err = ql_get_adapter_resources(qdev);
4188	if (err)
4189		goto error_up;
4190
4191	err = ql_adapter_up(qdev);
4192	if (err)
4193		goto error_up;
4194
4195	return err;
4196
4197error_up:
4198	ql_release_adapter_resources(qdev);
4199	return err;
4200}
4201
4202static int ql_change_rx_buffers(struct ql_adapter *qdev)
4203{
4204	struct rx_ring *rx_ring;
4205	int i, status;
4206	u32 lbq_buf_len;
4207
4208	/* Wait for an outstanding reset to complete. */
4209	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4210		int i = 3;
4211		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4212			netif_err(qdev, ifup, qdev->ndev,
4213				  "Waiting for adapter UP...\n");
4214			ssleep(1);
4215		}
4216
4217		if (!i) {
4218			netif_err(qdev, ifup, qdev->ndev,
4219				  "Timed out waiting for adapter UP\n");
4220			return -ETIMEDOUT;
4221		}
4222	}
4223
4224	status = ql_adapter_down(qdev);
4225	if (status)
4226		goto error;
4227
4228	/* Get the new rx buffer size. */
4229	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4230		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4231	qdev->lbq_buf_order = get_order(lbq_buf_len);
4232
4233	for (i = 0; i < qdev->rss_ring_count; i++) {
4234		rx_ring = &qdev->rx_ring[i];
4235		/* Set the new size. */
4236		rx_ring->lbq_buf_size = lbq_buf_len;
4237	}
4238
4239	status = ql_adapter_up(qdev);
4240	if (status)
4241		goto error;
4242
4243	return status;
4244error:
4245	netif_alert(qdev, ifup, qdev->ndev,
4246		    "Driver up/down cycle failed, closing device.\n");
4247	set_bit(QL_ADAPTER_UP, &qdev->flags);
4248	dev_close(qdev->ndev);
4249	return status;
4250}
4251
4252static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4253{
4254	struct ql_adapter *qdev = netdev_priv(ndev);
4255	int status;
4256
4257	if (ndev->mtu == 1500 && new_mtu == 9000) {
4258		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4259	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4260		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4261	} else
4262		return -EINVAL;
4263
4264	queue_delayed_work(qdev->workqueue,
4265			&qdev->mpi_port_cfg_work, 3*HZ);
4266
4267	ndev->mtu = new_mtu;
4268
4269	if (!netif_running(qdev->ndev)) {
4270		return 0;
4271	}
4272
4273	status = ql_change_rx_buffers(qdev);
4274	if (status) {
4275		netif_err(qdev, ifup, qdev->ndev,
4276			  "Changing MTU failed.\n");
4277	}
4278
4279	return status;
4280}
4281
4282static struct net_device_stats *qlge_get_stats(struct net_device
4283					       *ndev)
4284{
4285	struct ql_adapter *qdev = netdev_priv(ndev);
4286	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4287	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4288	unsigned long pkts, mcast, dropped, errors, bytes;
4289	int i;
4290
4291	/* Get RX stats. */
4292	pkts = mcast = dropped = errors = bytes = 0;
4293	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4294			pkts += rx_ring->rx_packets;
4295			bytes += rx_ring->rx_bytes;
4296			dropped += rx_ring->rx_dropped;
4297			errors += rx_ring->rx_errors;
4298			mcast += rx_ring->rx_multicast;
4299	}
4300	ndev->stats.rx_packets = pkts;
4301	ndev->stats.rx_bytes = bytes;
4302	ndev->stats.rx_dropped = dropped;
4303	ndev->stats.rx_errors = errors;
4304	ndev->stats.multicast = mcast;
4305
4306	/* Get TX stats. */
4307	pkts = errors = bytes = 0;
4308	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4309			pkts += tx_ring->tx_packets;
4310			bytes += tx_ring->tx_bytes;
4311			errors += tx_ring->tx_errors;
4312	}
4313	ndev->stats.tx_packets = pkts;
4314	ndev->stats.tx_bytes = bytes;
4315	ndev->stats.tx_errors = errors;
4316	return &ndev->stats;
4317}
4318
4319static void qlge_set_multicast_list(struct net_device *ndev)
4320{
4321	struct ql_adapter *qdev = netdev_priv(ndev);
4322	struct netdev_hw_addr *ha;
4323	int i, status;
4324
4325	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4326	if (status)
4327		return;
4328	/*
4329	 * Set or clear promiscuous mode if a
4330	 * transition is taking place.
4331	 */
4332	if (ndev->flags & IFF_PROMISC) {
4333		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4334			if (ql_set_routing_reg
4335			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4336				netif_err(qdev, hw, qdev->ndev,
4337					  "Failed to set promiscuous mode.\n");
4338			} else {
4339				set_bit(QL_PROMISCUOUS, &qdev->flags);
4340			}
4341		}
4342	} else {
4343		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4344			if (ql_set_routing_reg
4345			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4346				netif_err(qdev, hw, qdev->ndev,
4347					  "Failed to clear promiscuous mode.\n");
4348			} else {
4349				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4350			}
4351		}
4352	}
4353
4354	/*
4355	 * Set or clear all multicast mode if a
4356	 * transition is taking place.
4357	 */
4358	if ((ndev->flags & IFF_ALLMULTI) ||
4359	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4360		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4361			if (ql_set_routing_reg
4362			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4363				netif_err(qdev, hw, qdev->ndev,
4364					  "Failed to set all-multi mode.\n");
4365			} else {
4366				set_bit(QL_ALLMULTI, &qdev->flags);
4367			}
4368		}
4369	} else {
4370		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4371			if (ql_set_routing_reg
4372			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4373				netif_err(qdev, hw, qdev->ndev,
4374					  "Failed to clear all-multi mode.\n");
4375			} else {
4376				clear_bit(QL_ALLMULTI, &qdev->flags);
4377			}
4378		}
4379	}
4380
4381	if (!netdev_mc_empty(ndev)) {
4382		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4383		if (status)
4384			goto exit;
4385		i = 0;
4386		netdev_for_each_mc_addr(ha, ndev) {
4387			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4388						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4389				netif_err(qdev, hw, qdev->ndev,
4390					  "Failed to loadmulticast address.\n");
4391				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4392				goto exit;
4393			}
4394			i++;
4395		}
4396		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4397		if (ql_set_routing_reg
4398		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4399			netif_err(qdev, hw, qdev->ndev,
4400				  "Failed to set multicast match mode.\n");
4401		} else {
4402			set_bit(QL_ALLMULTI, &qdev->flags);
4403		}
4404	}
4405exit:
4406	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4407}
4408
4409static int qlge_set_mac_address(struct net_device *ndev, void *p)
4410{
4411	struct ql_adapter *qdev = netdev_priv(ndev);
4412	struct sockaddr *addr = p;
4413	int status;
4414
4415	if (!is_valid_ether_addr(addr->sa_data))
4416		return -EADDRNOTAVAIL;
4417	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4418	/* Update local copy of current mac address. */
4419	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4420
4421	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4422	if (status)
4423		return status;
4424	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4425			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4426	if (status)
4427		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4428	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4429	return status;
4430}
4431
4432static void qlge_tx_timeout(struct net_device *ndev)
4433{
4434	struct ql_adapter *qdev = netdev_priv(ndev);
4435	ql_queue_asic_error(qdev);
4436}
4437
4438static void ql_asic_reset_work(struct work_struct *work)
4439{
4440	struct ql_adapter *qdev =
4441	    container_of(work, struct ql_adapter, asic_reset_work.work);
4442	int status;
4443	rtnl_lock();
4444	status = ql_adapter_down(qdev);
4445	if (status)
4446		goto error;
4447
4448	status = ql_adapter_up(qdev);
4449	if (status)
4450		goto error;
4451
4452	/* Restore rx mode. */
4453	clear_bit(QL_ALLMULTI, &qdev->flags);
4454	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4455	qlge_set_multicast_list(qdev->ndev);
4456
4457	rtnl_unlock();
4458	return;
4459error:
4460	netif_alert(qdev, ifup, qdev->ndev,
4461		    "Driver up/down cycle failed, closing device\n");
4462
4463	set_bit(QL_ADAPTER_UP, &qdev->flags);
4464	dev_close(qdev->ndev);
4465	rtnl_unlock();
4466}
4467
4468static const struct nic_operations qla8012_nic_ops = {
4469	.get_flash		= ql_get_8012_flash_params,
4470	.port_initialize	= ql_8012_port_initialize,
4471};
4472
4473static const struct nic_operations qla8000_nic_ops = {
4474	.get_flash		= ql_get_8000_flash_params,
4475	.port_initialize	= ql_8000_port_initialize,
4476};
4477
4478/* Find the pcie function number for the other NIC
4479 * on this chip.  Since both NIC functions share a
4480 * common firmware we have the lowest enabled function
4481 * do any common work.  Examples would be resetting
4482 * after a fatal firmware error, or doing a firmware
4483 * coredump.
4484 */
4485static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4486{
4487	int status = 0;
4488	u32 temp;
4489	u32 nic_func1, nic_func2;
4490
4491	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4492			&temp);
4493	if (status)
4494		return status;
4495
4496	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4497			MPI_TEST_NIC_FUNC_MASK);
4498	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4499			MPI_TEST_NIC_FUNC_MASK);
4500
4501	if (qdev->func == nic_func1)
4502		qdev->alt_func = nic_func2;
4503	else if (qdev->func == nic_func2)
4504		qdev->alt_func = nic_func1;
4505	else
4506		status = -EIO;
4507
4508	return status;
4509}
4510
4511static int ql_get_board_info(struct ql_adapter *qdev)
4512{
4513	int status;
4514	qdev->func =
4515	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4516	if (qdev->func > 3)
4517		return -EIO;
4518
4519	status = ql_get_alt_pcie_func(qdev);
4520	if (status)
4521		return status;
4522
4523	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4524	if (qdev->port) {
4525		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4526		qdev->port_link_up = STS_PL1;
4527		qdev->port_init = STS_PI1;
4528		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4529		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4530	} else {
4531		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4532		qdev->port_link_up = STS_PL0;
4533		qdev->port_init = STS_PI0;
4534		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4535		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4536	}
4537	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4538	qdev->device_id = qdev->pdev->device;
4539	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4540		qdev->nic_ops = &qla8012_nic_ops;
4541	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4542		qdev->nic_ops = &qla8000_nic_ops;
4543	return status;
4544}
4545
4546static void ql_release_all(struct pci_dev *pdev)
4547{
4548	struct net_device *ndev = pci_get_drvdata(pdev);
4549	struct ql_adapter *qdev = netdev_priv(ndev);
4550
4551	if (qdev->workqueue) {
4552		destroy_workqueue(qdev->workqueue);
4553		qdev->workqueue = NULL;
4554	}
4555
4556	if (qdev->reg_base)
4557		iounmap(qdev->reg_base);
4558	if (qdev->doorbell_area)
4559		iounmap(qdev->doorbell_area);
4560	vfree(qdev->mpi_coredump);
4561	pci_release_regions(pdev);
4562}
4563
4564static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4565			  int cards_found)
4566{
4567	struct ql_adapter *qdev = netdev_priv(ndev);
4568	int err = 0;
4569
4570	memset((void *)qdev, 0, sizeof(*qdev));
4571	err = pci_enable_device(pdev);
4572	if (err) {
4573		dev_err(&pdev->dev, "PCI device enable failed.\n");
4574		return err;
4575	}
4576
4577	qdev->ndev = ndev;
4578	qdev->pdev = pdev;
4579	pci_set_drvdata(pdev, ndev);
4580
4581	/* Set PCIe read request size */
4582	err = pcie_set_readrq(pdev, 4096);
4583	if (err) {
4584		dev_err(&pdev->dev, "Set readrq failed.\n");
4585		goto err_out1;
4586	}
4587
4588	err = pci_request_regions(pdev, DRV_NAME);
4589	if (err) {
4590		dev_err(&pdev->dev, "PCI region request failed.\n");
4591		return err;
4592	}
4593
4594	pci_set_master(pdev);
4595	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4596		set_bit(QL_DMA64, &qdev->flags);
4597		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4598	} else {
4599		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4600		if (!err)
4601		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4602	}
4603
4604	if (err) {
4605		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4606		goto err_out2;
4607	}
4608
4609	/* Set PCIe reset type for EEH to fundamental. */
4610	pdev->needs_freset = 1;
4611	pci_save_state(pdev);
4612	qdev->reg_base =
4613	    ioremap_nocache(pci_resource_start(pdev, 1),
4614			    pci_resource_len(pdev, 1));
4615	if (!qdev->reg_base) {
4616		dev_err(&pdev->dev, "Register mapping failed.\n");
4617		err = -ENOMEM;
4618		goto err_out2;
4619	}
4620
4621	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4622	qdev->doorbell_area =
4623	    ioremap_nocache(pci_resource_start(pdev, 3),
4624			    pci_resource_len(pdev, 3));
4625	if (!qdev->doorbell_area) {
4626		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4627		err = -ENOMEM;
4628		goto err_out2;
4629	}
4630
4631	err = ql_get_board_info(qdev);
4632	if (err) {
4633		dev_err(&pdev->dev, "Register access failed.\n");
4634		err = -EIO;
4635		goto err_out2;
4636	}
4637	qdev->msg_enable = netif_msg_init(debug, default_msg);
4638	spin_lock_init(&qdev->hw_lock);
4639	spin_lock_init(&qdev->stats_lock);
4640
4641	if (qlge_mpi_coredump) {
4642		qdev->mpi_coredump =
4643			vmalloc(sizeof(struct ql_mpi_coredump));
4644		if (qdev->mpi_coredump == NULL) {
4645			err = -ENOMEM;
4646			goto err_out2;
4647		}
4648		if (qlge_force_coredump)
4649			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4650	}
4651	/* make sure the EEPROM is good */
4652	err = qdev->nic_ops->get_flash(qdev);
4653	if (err) {
4654		dev_err(&pdev->dev, "Invalid FLASH.\n");
4655		goto err_out2;
4656	}
4657
4658	/* Keep local copy of current mac address. */
4659	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4660
4661	/* Set up the default ring sizes. */
4662	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4663	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4664
4665	/* Set up the coalescing parameters. */
4666	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4667	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4668	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4669	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4670
4671	/*
4672	 * Set up the operating parameters.
4673	 */
4674	qdev->workqueue = create_singlethread_workqueue(ndev->name);
4675	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4676	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4677	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4678	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4679	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4680	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4681	init_completion(&qdev->ide_completion);
4682	mutex_init(&qdev->mpi_mutex);
4683
4684	if (!cards_found) {
4685		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4686		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4687			 DRV_NAME, DRV_VERSION);
4688	}
4689	return 0;
4690err_out2:
4691	ql_release_all(pdev);
4692err_out1:
4693	pci_disable_device(pdev);
4694	return err;
4695}
4696
4697static const struct net_device_ops qlge_netdev_ops = {
4698	.ndo_open		= qlge_open,
4699	.ndo_stop		= qlge_close,
4700	.ndo_start_xmit		= qlge_send,
4701	.ndo_change_mtu		= qlge_change_mtu,
4702	.ndo_get_stats		= qlge_get_stats,
4703	.ndo_set_rx_mode	= qlge_set_multicast_list,
4704	.ndo_set_mac_address	= qlge_set_mac_address,
4705	.ndo_validate_addr	= eth_validate_addr,
4706	.ndo_tx_timeout		= qlge_tx_timeout,
4707	.ndo_fix_features	= qlge_fix_features,
4708	.ndo_set_features	= qlge_set_features,
4709	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4710	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4711};
4712
4713static void ql_timer(unsigned long data)
4714{
4715	struct ql_adapter *qdev = (struct ql_adapter *)data;
4716	u32 var = 0;
4717
4718	var = ql_read32(qdev, STS);
4719	if (pci_channel_offline(qdev->pdev)) {
4720		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4721		return;
4722	}
4723
4724	mod_timer(&qdev->timer, jiffies + (5*HZ));
4725}
4726
4727static int qlge_probe(struct pci_dev *pdev,
4728		      const struct pci_device_id *pci_entry)
4729{
4730	struct net_device *ndev = NULL;
4731	struct ql_adapter *qdev = NULL;
4732	static int cards_found = 0;
4733	int err = 0;
4734
4735	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4736			min(MAX_CPUS, netif_get_num_default_rss_queues()));
4737	if (!ndev)
4738		return -ENOMEM;
4739
4740	err = ql_init_device(pdev, ndev, cards_found);
4741	if (err < 0) {
4742		free_netdev(ndev);
4743		return err;
4744	}
4745
4746	qdev = netdev_priv(ndev);
4747	SET_NETDEV_DEV(ndev, &pdev->dev);
4748	ndev->hw_features = NETIF_F_SG |
4749			    NETIF_F_IP_CSUM |
4750			    NETIF_F_TSO |
4751			    NETIF_F_TSO_ECN |
4752			    NETIF_F_HW_VLAN_CTAG_TX |
4753			    NETIF_F_HW_VLAN_CTAG_RX |
4754			    NETIF_F_HW_VLAN_CTAG_FILTER |
4755			    NETIF_F_RXCSUM;
4756	ndev->features = ndev->hw_features;
4757	ndev->vlan_features = ndev->hw_features;
4758	/* vlan gets same features (except vlan filter) */
4759	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4760				 NETIF_F_HW_VLAN_CTAG_TX |
4761				 NETIF_F_HW_VLAN_CTAG_RX);
4762
4763	if (test_bit(QL_DMA64, &qdev->flags))
4764		ndev->features |= NETIF_F_HIGHDMA;
4765
4766	/*
4767	 * Set up net_device structure.
4768	 */
4769	ndev->tx_queue_len = qdev->tx_ring_size;
4770	ndev->irq = pdev->irq;
4771
4772	ndev->netdev_ops = &qlge_netdev_ops;
4773	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4774	ndev->watchdog_timeo = 10 * HZ;
4775
4776	err = register_netdev(ndev);
4777	if (err) {
4778		dev_err(&pdev->dev, "net device registration failed.\n");
4779		ql_release_all(pdev);
4780		pci_disable_device(pdev);
4781		free_netdev(ndev);
4782		return err;
4783	}
4784	/* Start up the timer to trigger EEH if
4785	 * the bus goes dead
4786	 */
4787	init_timer_deferrable(&qdev->timer);
4788	qdev->timer.data = (unsigned long)qdev;
4789	qdev->timer.function = ql_timer;
4790	qdev->timer.expires = jiffies + (5*HZ);
4791	add_timer(&qdev->timer);
4792	ql_link_off(qdev);
4793	ql_display_dev_info(ndev);
4794	atomic_set(&qdev->lb_count, 0);
4795	cards_found++;
4796	return 0;
4797}
4798
4799netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4800{
4801	return qlge_send(skb, ndev);
4802}
4803
4804int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4805{
4806	return ql_clean_inbound_rx_ring(rx_ring, budget);
4807}
4808
4809static void qlge_remove(struct pci_dev *pdev)
4810{
4811	struct net_device *ndev = pci_get_drvdata(pdev);
4812	struct ql_adapter *qdev = netdev_priv(ndev);
4813	del_timer_sync(&qdev->timer);
4814	ql_cancel_all_work_sync(qdev);
4815	unregister_netdev(ndev);
4816	ql_release_all(pdev);
4817	pci_disable_device(pdev);
4818	free_netdev(ndev);
4819}
4820
4821/* Clean up resources without touching hardware. */
4822static void ql_eeh_close(struct net_device *ndev)
4823{
4824	int i;
4825	struct ql_adapter *qdev = netdev_priv(ndev);
4826
4827	if (netif_carrier_ok(ndev)) {
4828		netif_carrier_off(ndev);
4829		netif_stop_queue(ndev);
4830	}
4831
4832	/* Disabling the timer */
4833	del_timer_sync(&qdev->timer);
4834	ql_cancel_all_work_sync(qdev);
4835
4836	for (i = 0; i < qdev->rss_ring_count; i++)
4837		netif_napi_del(&qdev->rx_ring[i].napi);
4838
4839	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4840	ql_tx_ring_clean(qdev);
4841	ql_free_rx_buffers(qdev);
4842	ql_release_adapter_resources(qdev);
4843}
4844
4845/*
4846 * This callback is called by the PCI subsystem whenever
4847 * a PCI bus error is detected.
4848 */
4849static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4850					       enum pci_channel_state state)
4851{
4852	struct net_device *ndev = pci_get_drvdata(pdev);
4853	struct ql_adapter *qdev = netdev_priv(ndev);
4854
4855	switch (state) {
4856	case pci_channel_io_normal:
4857		return PCI_ERS_RESULT_CAN_RECOVER;
4858	case pci_channel_io_frozen:
4859		netif_device_detach(ndev);
4860		if (netif_running(ndev))
4861			ql_eeh_close(ndev);
4862		pci_disable_device(pdev);
4863		return PCI_ERS_RESULT_NEED_RESET;
4864	case pci_channel_io_perm_failure:
4865		dev_err(&pdev->dev,
4866			"%s: pci_channel_io_perm_failure.\n", __func__);
4867		ql_eeh_close(ndev);
4868		set_bit(QL_EEH_FATAL, &qdev->flags);
4869		return PCI_ERS_RESULT_DISCONNECT;
4870	}
4871
4872	/* Request a slot reset. */
4873	return PCI_ERS_RESULT_NEED_RESET;
4874}
4875
4876/*
4877 * This callback is called after the PCI buss has been reset.
4878 * Basically, this tries to restart the card from scratch.
4879 * This is a shortened version of the device probe/discovery code,
4880 * it resembles the first-half of the () routine.
4881 */
4882static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4883{
4884	struct net_device *ndev = pci_get_drvdata(pdev);
4885	struct ql_adapter *qdev = netdev_priv(ndev);
4886
4887	pdev->error_state = pci_channel_io_normal;
4888
4889	pci_restore_state(pdev);
4890	if (pci_enable_device(pdev)) {
4891		netif_err(qdev, ifup, qdev->ndev,
4892			  "Cannot re-enable PCI device after reset.\n");
4893		return PCI_ERS_RESULT_DISCONNECT;
4894	}
4895	pci_set_master(pdev);
4896
4897	if (ql_adapter_reset(qdev)) {
4898		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4899		set_bit(QL_EEH_FATAL, &qdev->flags);
4900		return PCI_ERS_RESULT_DISCONNECT;
4901	}
4902
4903	return PCI_ERS_RESULT_RECOVERED;
4904}
4905
4906static void qlge_io_resume(struct pci_dev *pdev)
4907{
4908	struct net_device *ndev = pci_get_drvdata(pdev);
4909	struct ql_adapter *qdev = netdev_priv(ndev);
4910	int err = 0;
4911
4912	if (netif_running(ndev)) {
4913		err = qlge_open(ndev);
4914		if (err) {
4915			netif_err(qdev, ifup, qdev->ndev,
4916				  "Device initialization failed after reset.\n");
4917			return;
4918		}
4919	} else {
4920		netif_err(qdev, ifup, qdev->ndev,
4921			  "Device was not running prior to EEH.\n");
4922	}
4923	mod_timer(&qdev->timer, jiffies + (5*HZ));
4924	netif_device_attach(ndev);
4925}
4926
4927static const struct pci_error_handlers qlge_err_handler = {
4928	.error_detected = qlge_io_error_detected,
4929	.slot_reset = qlge_io_slot_reset,
4930	.resume = qlge_io_resume,
4931};
4932
4933static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4934{
4935	struct net_device *ndev = pci_get_drvdata(pdev);
4936	struct ql_adapter *qdev = netdev_priv(ndev);
4937	int err;
4938
4939	netif_device_detach(ndev);
4940	del_timer_sync(&qdev->timer);
4941
4942	if (netif_running(ndev)) {
4943		err = ql_adapter_down(qdev);
4944		if (!err)
4945			return err;
4946	}
4947
4948	ql_wol(qdev);
4949	err = pci_save_state(pdev);
4950	if (err)
4951		return err;
4952
4953	pci_disable_device(pdev);
4954
4955	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4956
4957	return 0;
4958}
4959
4960#ifdef CONFIG_PM
4961static int qlge_resume(struct pci_dev *pdev)
4962{
4963	struct net_device *ndev = pci_get_drvdata(pdev);
4964	struct ql_adapter *qdev = netdev_priv(ndev);
4965	int err;
4966
4967	pci_set_power_state(pdev, PCI_D0);
4968	pci_restore_state(pdev);
4969	err = pci_enable_device(pdev);
4970	if (err) {
4971		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4972		return err;
4973	}
4974	pci_set_master(pdev);
4975
4976	pci_enable_wake(pdev, PCI_D3hot, 0);
4977	pci_enable_wake(pdev, PCI_D3cold, 0);
4978
4979	if (netif_running(ndev)) {
4980		err = ql_adapter_up(qdev);
4981		if (err)
4982			return err;
4983	}
4984
4985	mod_timer(&qdev->timer, jiffies + (5*HZ));
4986	netif_device_attach(ndev);
4987
4988	return 0;
4989}
4990#endif /* CONFIG_PM */
4991
4992static void qlge_shutdown(struct pci_dev *pdev)
4993{
4994	qlge_suspend(pdev, PMSG_SUSPEND);
4995}
4996
4997static struct pci_driver qlge_driver = {
4998	.name = DRV_NAME,
4999	.id_table = qlge_pci_tbl,
5000	.probe = qlge_probe,
5001	.remove = qlge_remove,
5002#ifdef CONFIG_PM
5003	.suspend = qlge_suspend,
5004	.resume = qlge_resume,
5005#endif
5006	.shutdown = qlge_shutdown,
5007	.err_handler = &qlge_err_handler
5008};
5009
5010module_pci_driver(qlge_driver);