Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/* bnx2.c: Broadcom NX2 network driver.
   2 *
   3 * Copyright (c) 2004-2011 Broadcom Corporation
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License as published by
   7 * the Free Software Foundation.
   8 *
   9 * Written by: Michael Chan  (mchan@broadcom.com)
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16
  17#include <linux/kernel.h>
  18#include <linux/timer.h>
  19#include <linux/errno.h>
  20#include <linux/ioport.h>
  21#include <linux/slab.h>
  22#include <linux/vmalloc.h>
  23#include <linux/interrupt.h>
  24#include <linux/pci.h>
  25#include <linux/init.h>
  26#include <linux/netdevice.h>
  27#include <linux/etherdevice.h>
  28#include <linux/skbuff.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/bitops.h>
  31#include <asm/io.h>
  32#include <asm/irq.h>
  33#include <linux/delay.h>
  34#include <asm/byteorder.h>
  35#include <asm/page.h>
  36#include <linux/time.h>
  37#include <linux/ethtool.h>
  38#include <linux/mii.h>
  39#include <linux/if_vlan.h>
  40#include <net/ip.h>
  41#include <net/tcp.h>
  42#include <net/checksum.h>
  43#include <linux/workqueue.h>
  44#include <linux/crc32.h>
  45#include <linux/prefetch.h>
  46#include <linux/cache.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/aer.h>
  50
  51#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  52#define BCM_CNIC 1
  53#include "cnic_if.h"
  54#endif
  55#include "bnx2.h"
  56#include "bnx2_fw.h"
  57
  58#define DRV_MODULE_NAME		"bnx2"
  59#define DRV_MODULE_VERSION	"2.1.11"
  60#define DRV_MODULE_RELDATE	"July 20, 2011"
  61#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.1.fw"
  62#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
  63#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1a.fw"
  64#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  65#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
  66
  67#define RUN_AT(x) (jiffies + (x))
  68
  69/* Time in jiffies before concluding the transmitter is hung. */
  70#define TX_TIMEOUT  (5*HZ)
  71
  72static char version[] __devinitdata =
  73	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  74
  75MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  76MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  77MODULE_LICENSE("GPL");
  78MODULE_VERSION(DRV_MODULE_VERSION);
  79MODULE_FIRMWARE(FW_MIPS_FILE_06);
  80MODULE_FIRMWARE(FW_RV2P_FILE_06);
  81MODULE_FIRMWARE(FW_MIPS_FILE_09);
  82MODULE_FIRMWARE(FW_RV2P_FILE_09);
  83MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  84
  85static int disable_msi = 0;
  86
  87module_param(disable_msi, int, 0);
  88MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  89
  90typedef enum {
  91	BCM5706 = 0,
  92	NC370T,
  93	NC370I,
  94	BCM5706S,
  95	NC370F,
  96	BCM5708,
  97	BCM5708S,
  98	BCM5709,
  99	BCM5709S,
 100	BCM5716,
 101	BCM5716S,
 102} board_t;
 103
 104/* indexed by board_t, above */
 105static struct {
 106	char *name;
 107} board_info[] __devinitdata = {
 108	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
 109	{ "HP NC370T Multifunction Gigabit Server Adapter" },
 110	{ "HP NC370i Multifunction Gigabit Server Adapter" },
 111	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 112	{ "HP NC370F Multifunction Gigabit Server Adapter" },
 113	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
 114	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 115	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
 116	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 117	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
 118	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 119	};
 120
 121static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
 122	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 123	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 124	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 125	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 126	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 127	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 128	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 129	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 130	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 131	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 132	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 133	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 134	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 135	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 136	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 137	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 138	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 139	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 140	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
 141	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 142	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
 143	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 144	{ 0, }
 145};
 146
 147static const struct flash_spec flash_table[] =
 148{
 149#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 150#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
 151	/* Slow EEPROM */
 152	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 153	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 154	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 155	 "EEPROM - slow"},
 156	/* Expansion entry 0001 */
 157	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 158	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 159	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 160	 "Entry 0001"},
 161	/* Saifun SA25F010 (non-buffered flash) */
 162	/* strap, cfg1, & write1 need updates */
 163	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 164	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 165	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 166	 "Non-buffered flash (128kB)"},
 167	/* Saifun SA25F020 (non-buffered flash) */
 168	/* strap, cfg1, & write1 need updates */
 169	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 170	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 171	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 172	 "Non-buffered flash (256kB)"},
 173	/* Expansion entry 0100 */
 174	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 175	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 176	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 177	 "Entry 0100"},
 178	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 179	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 180	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 181	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 182	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 183	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
 184	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 185	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 186	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 187	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 188	/* Saifun SA25F005 (non-buffered flash) */
 189	/* strap, cfg1, & write1 need updates */
 190	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 191	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 192	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 193	 "Non-buffered flash (64kB)"},
 194	/* Fast EEPROM */
 195	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 196	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 197	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 198	 "EEPROM - fast"},
 199	/* Expansion entry 1001 */
 200	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 201	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 202	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 203	 "Entry 1001"},
 204	/* Expansion entry 1010 */
 205	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 206	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 207	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 208	 "Entry 1010"},
 209	/* ATMEL AT45DB011B (buffered flash) */
 210	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 211	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 212	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 213	 "Buffered flash (128kB)"},
 214	/* Expansion entry 1100 */
 215	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 216	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 217	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 218	 "Entry 1100"},
 219	/* Expansion entry 1101 */
 220	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 221	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 222	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 223	 "Entry 1101"},
 224	/* Ateml Expansion entry 1110 */
 225	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 226	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 227	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 228	 "Entry 1110 (Atmel)"},
 229	/* ATMEL AT45DB021B (buffered flash) */
 230	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 231	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 232	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 233	 "Buffered flash (256kB)"},
 234};
 235
 236static const struct flash_spec flash_5709 = {
 237	.flags		= BNX2_NV_BUFFERED,
 238	.page_bits	= BCM5709_FLASH_PAGE_BITS,
 239	.page_size	= BCM5709_FLASH_PAGE_SIZE,
 240	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
 241	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
 242	.name		= "5709 Buffered flash (256kB)",
 243};
 244
 245MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 246
 247static void bnx2_init_napi(struct bnx2 *bp);
 248static void bnx2_del_napi(struct bnx2 *bp);
 249
 250static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 251{
 252	u32 diff;
 253
 254	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
 255	barrier();
 256
 257	/* The ring uses 256 indices for 255 entries, one of them
 258	 * needs to be skipped.
 259	 */
 260	diff = txr->tx_prod - txr->tx_cons;
 261	if (unlikely(diff >= TX_DESC_CNT)) {
 262		diff &= 0xffff;
 263		if (diff == TX_DESC_CNT)
 264			diff = MAX_TX_DESC_CNT;
 265	}
 266	return bp->tx_ring_size - diff;
 267}
 268
 269static u32
 270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 271{
 272	u32 val;
 273
 274	spin_lock_bh(&bp->indirect_lock);
 275	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 276	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
 277	spin_unlock_bh(&bp->indirect_lock);
 278	return val;
 279}
 280
 281static void
 282bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 283{
 284	spin_lock_bh(&bp->indirect_lock);
 285	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 286	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 287	spin_unlock_bh(&bp->indirect_lock);
 288}
 289
 290static void
 291bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 292{
 293	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 294}
 295
 296static u32
 297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 298{
 299	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
 300}
 301
 302static void
 303bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 304{
 305	offset += cid_addr;
 306	spin_lock_bh(&bp->indirect_lock);
 307	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 308		int i;
 309
 310		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
 311		REG_WR(bp, BNX2_CTX_CTX_CTRL,
 312		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 313		for (i = 0; i < 5; i++) {
 314			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
 315			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 316				break;
 317			udelay(5);
 318		}
 319	} else {
 320		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
 321		REG_WR(bp, BNX2_CTX_DATA, val);
 322	}
 323	spin_unlock_bh(&bp->indirect_lock);
 324}
 325
 326#ifdef BCM_CNIC
 327static int
 328bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 329{
 330	struct bnx2 *bp = netdev_priv(dev);
 331	struct drv_ctl_io *io = &info->data.io;
 332
 333	switch (info->cmd) {
 334	case DRV_CTL_IO_WR_CMD:
 335		bnx2_reg_wr_ind(bp, io->offset, io->data);
 336		break;
 337	case DRV_CTL_IO_RD_CMD:
 338		io->data = bnx2_reg_rd_ind(bp, io->offset);
 339		break;
 340	case DRV_CTL_CTX_WR_CMD:
 341		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 342		break;
 343	default:
 344		return -EINVAL;
 345	}
 346	return 0;
 347}
 348
 349static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 350{
 351	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 352	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 353	int sb_id;
 354
 355	if (bp->flags & BNX2_FLAG_USING_MSIX) {
 356		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 357		bnapi->cnic_present = 0;
 358		sb_id = bp->irq_nvecs;
 359		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 360	} else {
 361		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 362		bnapi->cnic_tag = bnapi->last_status_idx;
 363		bnapi->cnic_present = 1;
 364		sb_id = 0;
 365		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 366	}
 367
 368	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 369	cp->irq_arr[0].status_blk = (void *)
 370		((unsigned long) bnapi->status_blk.msi +
 371		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 372	cp->irq_arr[0].status_blk_num = sb_id;
 373	cp->num_irq = 1;
 374}
 375
 376static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 377			      void *data)
 378{
 379	struct bnx2 *bp = netdev_priv(dev);
 380	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 381
 382	if (ops == NULL)
 383		return -EINVAL;
 384
 385	if (cp->drv_state & CNIC_DRV_STATE_REGD)
 386		return -EBUSY;
 387
 388	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
 389		return -ENODEV;
 390
 391	bp->cnic_data = data;
 392	rcu_assign_pointer(bp->cnic_ops, ops);
 393
 394	cp->num_irq = 0;
 395	cp->drv_state = CNIC_DRV_STATE_REGD;
 396
 397	bnx2_setup_cnic_irq_info(bp);
 398
 399	return 0;
 400}
 401
 402static int bnx2_unregister_cnic(struct net_device *dev)
 403{
 404	struct bnx2 *bp = netdev_priv(dev);
 405	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 406	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 407
 408	mutex_lock(&bp->cnic_lock);
 409	cp->drv_state = 0;
 410	bnapi->cnic_present = 0;
 411	rcu_assign_pointer(bp->cnic_ops, NULL);
 412	mutex_unlock(&bp->cnic_lock);
 413	synchronize_rcu();
 414	return 0;
 415}
 416
 417struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 418{
 419	struct bnx2 *bp = netdev_priv(dev);
 420	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 421
 422	if (!cp->max_iscsi_conn)
 423		return NULL;
 424
 425	cp->drv_owner = THIS_MODULE;
 426	cp->chip_id = bp->chip_id;
 427	cp->pdev = bp->pdev;
 428	cp->io_base = bp->regview;
 429	cp->drv_ctl = bnx2_drv_ctl;
 430	cp->drv_register_cnic = bnx2_register_cnic;
 431	cp->drv_unregister_cnic = bnx2_unregister_cnic;
 432
 433	return cp;
 434}
 435EXPORT_SYMBOL(bnx2_cnic_probe);
 436
 437static void
 438bnx2_cnic_stop(struct bnx2 *bp)
 439{
 440	struct cnic_ops *c_ops;
 441	struct cnic_ctl_info info;
 442
 443	mutex_lock(&bp->cnic_lock);
 444	c_ops = rcu_dereference_protected(bp->cnic_ops,
 445					  lockdep_is_held(&bp->cnic_lock));
 446	if (c_ops) {
 447		info.cmd = CNIC_CTL_STOP_CMD;
 448		c_ops->cnic_ctl(bp->cnic_data, &info);
 449	}
 450	mutex_unlock(&bp->cnic_lock);
 451}
 452
 453static void
 454bnx2_cnic_start(struct bnx2 *bp)
 455{
 456	struct cnic_ops *c_ops;
 457	struct cnic_ctl_info info;
 458
 459	mutex_lock(&bp->cnic_lock);
 460	c_ops = rcu_dereference_protected(bp->cnic_ops,
 461					  lockdep_is_held(&bp->cnic_lock));
 462	if (c_ops) {
 463		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 464			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 465
 466			bnapi->cnic_tag = bnapi->last_status_idx;
 467		}
 468		info.cmd = CNIC_CTL_START_CMD;
 469		c_ops->cnic_ctl(bp->cnic_data, &info);
 470	}
 471	mutex_unlock(&bp->cnic_lock);
 472}
 473
 474#else
 475
 476static void
 477bnx2_cnic_stop(struct bnx2 *bp)
 478{
 479}
 480
 481static void
 482bnx2_cnic_start(struct bnx2 *bp)
 483{
 484}
 485
 486#endif
 487
 488static int
 489bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 490{
 491	u32 val1;
 492	int i, ret;
 493
 494	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 495		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 496		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 497
 498		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 499		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 500
 501		udelay(40);
 502	}
 503
 504	val1 = (bp->phy_addr << 21) | (reg << 16) |
 505		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 506		BNX2_EMAC_MDIO_COMM_START_BUSY;
 507	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 508
 509	for (i = 0; i < 50; i++) {
 510		udelay(10);
 511
 512		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 513		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 514			udelay(5);
 515
 516			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 517			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 518
 519			break;
 520		}
 521	}
 522
 523	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 524		*val = 0x0;
 525		ret = -EBUSY;
 526	}
 527	else {
 528		*val = val1;
 529		ret = 0;
 530	}
 531
 532	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 533		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 534		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 535
 536		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 537		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 538
 539		udelay(40);
 540	}
 541
 542	return ret;
 543}
 544
 545static int
 546bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 547{
 548	u32 val1;
 549	int i, ret;
 550
 551	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 552		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 553		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 554
 555		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 556		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 557
 558		udelay(40);
 559	}
 560
 561	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 562		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 563		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 564	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 565
 566	for (i = 0; i < 50; i++) {
 567		udelay(10);
 568
 569		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
 570		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 571			udelay(5);
 572			break;
 573		}
 574	}
 575
 576	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 577        	ret = -EBUSY;
 578	else
 579		ret = 0;
 580
 581	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 582		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 583		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 584
 585		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 586		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
 587
 588		udelay(40);
 589	}
 590
 591	return ret;
 592}
 593
 594static void
 595bnx2_disable_int(struct bnx2 *bp)
 596{
 597	int i;
 598	struct bnx2_napi *bnapi;
 599
 600	for (i = 0; i < bp->irq_nvecs; i++) {
 601		bnapi = &bp->bnx2_napi[i];
 602		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 603		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 604	}
 605	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 606}
 607
 608static void
 609bnx2_enable_int(struct bnx2 *bp)
 610{
 611	int i;
 612	struct bnx2_napi *bnapi;
 613
 614	for (i = 0; i < bp->irq_nvecs; i++) {
 615		bnapi = &bp->bnx2_napi[i];
 616
 617		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 618		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 619		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 620		       bnapi->last_status_idx);
 621
 622		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 623		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 624		       bnapi->last_status_idx);
 625	}
 626	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 627}
 628
 629static void
 630bnx2_disable_int_sync(struct bnx2 *bp)
 631{
 632	int i;
 633
 634	atomic_inc(&bp->intr_sem);
 635	if (!netif_running(bp->dev))
 636		return;
 637
 638	bnx2_disable_int(bp);
 639	for (i = 0; i < bp->irq_nvecs; i++)
 640		synchronize_irq(bp->irq_tbl[i].vector);
 641}
 642
 643static void
 644bnx2_napi_disable(struct bnx2 *bp)
 645{
 646	int i;
 647
 648	for (i = 0; i < bp->irq_nvecs; i++)
 649		napi_disable(&bp->bnx2_napi[i].napi);
 650}
 651
 652static void
 653bnx2_napi_enable(struct bnx2 *bp)
 654{
 655	int i;
 656
 657	for (i = 0; i < bp->irq_nvecs; i++)
 658		napi_enable(&bp->bnx2_napi[i].napi);
 659}
 660
 661static void
 662bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 663{
 664	if (stop_cnic)
 665		bnx2_cnic_stop(bp);
 666	if (netif_running(bp->dev)) {
 667		bnx2_napi_disable(bp);
 668		netif_tx_disable(bp->dev);
 669	}
 670	bnx2_disable_int_sync(bp);
 671	netif_carrier_off(bp->dev);	/* prevent tx timeout */
 672}
 673
 674static void
 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 676{
 677	if (atomic_dec_and_test(&bp->intr_sem)) {
 678		if (netif_running(bp->dev)) {
 679			netif_tx_wake_all_queues(bp->dev);
 680			spin_lock_bh(&bp->phy_lock);
 681			if (bp->link_up)
 682				netif_carrier_on(bp->dev);
 683			spin_unlock_bh(&bp->phy_lock);
 684			bnx2_napi_enable(bp);
 685			bnx2_enable_int(bp);
 686			if (start_cnic)
 687				bnx2_cnic_start(bp);
 688		}
 689	}
 690}
 691
 692static void
 693bnx2_free_tx_mem(struct bnx2 *bp)
 694{
 695	int i;
 696
 697	for (i = 0; i < bp->num_tx_rings; i++) {
 698		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 699		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 700
 701		if (txr->tx_desc_ring) {
 702			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 703					  txr->tx_desc_ring,
 704					  txr->tx_desc_mapping);
 705			txr->tx_desc_ring = NULL;
 706		}
 707		kfree(txr->tx_buf_ring);
 708		txr->tx_buf_ring = NULL;
 709	}
 710}
 711
 712static void
 713bnx2_free_rx_mem(struct bnx2 *bp)
 714{
 715	int i;
 716
 717	for (i = 0; i < bp->num_rx_rings; i++) {
 718		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 719		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 720		int j;
 721
 722		for (j = 0; j < bp->rx_max_ring; j++) {
 723			if (rxr->rx_desc_ring[j])
 724				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 725						  rxr->rx_desc_ring[j],
 726						  rxr->rx_desc_mapping[j]);
 727			rxr->rx_desc_ring[j] = NULL;
 728		}
 729		vfree(rxr->rx_buf_ring);
 730		rxr->rx_buf_ring = NULL;
 731
 732		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 733			if (rxr->rx_pg_desc_ring[j])
 734				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 735						  rxr->rx_pg_desc_ring[j],
 736						  rxr->rx_pg_desc_mapping[j]);
 737			rxr->rx_pg_desc_ring[j] = NULL;
 738		}
 739		vfree(rxr->rx_pg_ring);
 740		rxr->rx_pg_ring = NULL;
 741	}
 742}
 743
 744static int
 745bnx2_alloc_tx_mem(struct bnx2 *bp)
 746{
 747	int i;
 748
 749	for (i = 0; i < bp->num_tx_rings; i++) {
 750		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 751		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 752
 753		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 754		if (txr->tx_buf_ring == NULL)
 755			return -ENOMEM;
 756
 757		txr->tx_desc_ring =
 758			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 759					   &txr->tx_desc_mapping, GFP_KERNEL);
 760		if (txr->tx_desc_ring == NULL)
 761			return -ENOMEM;
 762	}
 763	return 0;
 764}
 765
 766static int
 767bnx2_alloc_rx_mem(struct bnx2 *bp)
 768{
 769	int i;
 770
 771	for (i = 0; i < bp->num_rx_rings; i++) {
 772		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 773		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 774		int j;
 775
 776		rxr->rx_buf_ring =
 777			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
 778		if (rxr->rx_buf_ring == NULL)
 779			return -ENOMEM;
 780
 781		for (j = 0; j < bp->rx_max_ring; j++) {
 782			rxr->rx_desc_ring[j] =
 783				dma_alloc_coherent(&bp->pdev->dev,
 784						   RXBD_RING_SIZE,
 785						   &rxr->rx_desc_mapping[j],
 786						   GFP_KERNEL);
 787			if (rxr->rx_desc_ring[j] == NULL)
 788				return -ENOMEM;
 789
 790		}
 791
 792		if (bp->rx_pg_ring_size) {
 793			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
 794						  bp->rx_max_pg_ring);
 795			if (rxr->rx_pg_ring == NULL)
 796				return -ENOMEM;
 797
 798		}
 799
 800		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 801			rxr->rx_pg_desc_ring[j] =
 802				dma_alloc_coherent(&bp->pdev->dev,
 803						   RXBD_RING_SIZE,
 804						   &rxr->rx_pg_desc_mapping[j],
 805						   GFP_KERNEL);
 806			if (rxr->rx_pg_desc_ring[j] == NULL)
 807				return -ENOMEM;
 808
 809		}
 810	}
 811	return 0;
 812}
 813
 814static void
 815bnx2_free_mem(struct bnx2 *bp)
 816{
 817	int i;
 818	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 819
 820	bnx2_free_tx_mem(bp);
 821	bnx2_free_rx_mem(bp);
 822
 823	for (i = 0; i < bp->ctx_pages; i++) {
 824		if (bp->ctx_blk[i]) {
 825			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
 826					  bp->ctx_blk[i],
 827					  bp->ctx_blk_mapping[i]);
 828			bp->ctx_blk[i] = NULL;
 829		}
 830	}
 831	if (bnapi->status_blk.msi) {
 832		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
 833				  bnapi->status_blk.msi,
 834				  bp->status_blk_mapping);
 835		bnapi->status_blk.msi = NULL;
 836		bp->stats_blk = NULL;
 837	}
 838}
 839
 840static int
 841bnx2_alloc_mem(struct bnx2 *bp)
 842{
 843	int i, status_blk_size, err;
 844	struct bnx2_napi *bnapi;
 845	void *status_blk;
 846
 847	/* Combine status and statistics blocks into one allocation. */
 848	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 849	if (bp->flags & BNX2_FLAG_MSIX_CAP)
 850		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 851						 BNX2_SBLK_MSIX_ALIGN_SIZE);
 852	bp->status_stats_size = status_blk_size +
 853				sizeof(struct statistics_block);
 854
 855	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 856					&bp->status_blk_mapping, GFP_KERNEL);
 857	if (status_blk == NULL)
 858		goto alloc_mem_err;
 859
 860	memset(status_blk, 0, bp->status_stats_size);
 861
 862	bnapi = &bp->bnx2_napi[0];
 863	bnapi->status_blk.msi = status_blk;
 864	bnapi->hw_tx_cons_ptr =
 865		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 866	bnapi->hw_rx_cons_ptr =
 867		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 868	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 869		for (i = 1; i < bp->irq_nvecs; i++) {
 870			struct status_block_msix *sblk;
 871
 872			bnapi = &bp->bnx2_napi[i];
 873
 874			sblk = (void *) (status_blk +
 875					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 876			bnapi->status_blk.msix = sblk;
 877			bnapi->hw_tx_cons_ptr =
 878				&sblk->status_tx_quick_consumer_index;
 879			bnapi->hw_rx_cons_ptr =
 880				&sblk->status_rx_quick_consumer_index;
 881			bnapi->int_num = i << 24;
 882		}
 883	}
 884
 885	bp->stats_blk = status_blk + status_blk_size;
 886
 887	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 888
 889	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
 890		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
 891		if (bp->ctx_pages == 0)
 892			bp->ctx_pages = 1;
 893		for (i = 0; i < bp->ctx_pages; i++) {
 894			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
 895						BCM_PAGE_SIZE,
 896						&bp->ctx_blk_mapping[i],
 897						GFP_KERNEL);
 898			if (bp->ctx_blk[i] == NULL)
 899				goto alloc_mem_err;
 900		}
 901	}
 902
 903	err = bnx2_alloc_rx_mem(bp);
 904	if (err)
 905		goto alloc_mem_err;
 906
 907	err = bnx2_alloc_tx_mem(bp);
 908	if (err)
 909		goto alloc_mem_err;
 910
 911	return 0;
 912
 913alloc_mem_err:
 914	bnx2_free_mem(bp);
 915	return -ENOMEM;
 916}
 917
 918static void
 919bnx2_report_fw_link(struct bnx2 *bp)
 920{
 921	u32 fw_link_status = 0;
 922
 923	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 924		return;
 925
 926	if (bp->link_up) {
 927		u32 bmsr;
 928
 929		switch (bp->line_speed) {
 930		case SPEED_10:
 931			if (bp->duplex == DUPLEX_HALF)
 932				fw_link_status = BNX2_LINK_STATUS_10HALF;
 933			else
 934				fw_link_status = BNX2_LINK_STATUS_10FULL;
 935			break;
 936		case SPEED_100:
 937			if (bp->duplex == DUPLEX_HALF)
 938				fw_link_status = BNX2_LINK_STATUS_100HALF;
 939			else
 940				fw_link_status = BNX2_LINK_STATUS_100FULL;
 941			break;
 942		case SPEED_1000:
 943			if (bp->duplex == DUPLEX_HALF)
 944				fw_link_status = BNX2_LINK_STATUS_1000HALF;
 945			else
 946				fw_link_status = BNX2_LINK_STATUS_1000FULL;
 947			break;
 948		case SPEED_2500:
 949			if (bp->duplex == DUPLEX_HALF)
 950				fw_link_status = BNX2_LINK_STATUS_2500HALF;
 951			else
 952				fw_link_status = BNX2_LINK_STATUS_2500FULL;
 953			break;
 954		}
 955
 956		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 957
 958		if (bp->autoneg) {
 959			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 960
 961			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 962			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 963
 964			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 965			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 966				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 967			else
 968				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 969		}
 970	}
 971	else
 972		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 973
 974	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 975}
 976
 977static char *
 978bnx2_xceiver_str(struct bnx2 *bp)
 979{
 980	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
 981		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 982		 "Copper");
 983}
 984
 985static void
 986bnx2_report_link(struct bnx2 *bp)
 987{
 988	if (bp->link_up) {
 989		netif_carrier_on(bp->dev);
 990		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
 991			    bnx2_xceiver_str(bp),
 992			    bp->line_speed,
 993			    bp->duplex == DUPLEX_FULL ? "full" : "half");
 994
 995		if (bp->flow_ctrl) {
 996			if (bp->flow_ctrl & FLOW_CTRL_RX) {
 997				pr_cont(", receive ");
 998				if (bp->flow_ctrl & FLOW_CTRL_TX)
 999					pr_cont("& transmit ");
1000			}
1001			else {
1002				pr_cont(", transmit ");
1003			}
1004			pr_cont("flow control ON");
1005		}
1006		pr_cont("\n");
1007	} else {
1008		netif_carrier_off(bp->dev);
1009		netdev_err(bp->dev, "NIC %s Link is Down\n",
1010			   bnx2_xceiver_str(bp));
1011	}
1012
1013	bnx2_report_fw_link(bp);
1014}
1015
1016static void
1017bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1018{
1019	u32 local_adv, remote_adv;
1020
1021	bp->flow_ctrl = 0;
1022	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1023		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1024
1025		if (bp->duplex == DUPLEX_FULL) {
1026			bp->flow_ctrl = bp->req_flow_ctrl;
1027		}
1028		return;
1029	}
1030
1031	if (bp->duplex != DUPLEX_FULL) {
1032		return;
1033	}
1034
1035	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1036	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1037		u32 val;
1038
1039		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1040		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1041			bp->flow_ctrl |= FLOW_CTRL_TX;
1042		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1043			bp->flow_ctrl |= FLOW_CTRL_RX;
1044		return;
1045	}
1046
1047	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1049
1050	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1051		u32 new_local_adv = 0;
1052		u32 new_remote_adv = 0;
1053
1054		if (local_adv & ADVERTISE_1000XPAUSE)
1055			new_local_adv |= ADVERTISE_PAUSE_CAP;
1056		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1057			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1058		if (remote_adv & ADVERTISE_1000XPAUSE)
1059			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1060		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1061			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1062
1063		local_adv = new_local_adv;
1064		remote_adv = new_remote_adv;
1065	}
1066
1067	/* See Table 28B-3 of 802.3ab-1999 spec. */
1068	if (local_adv & ADVERTISE_PAUSE_CAP) {
1069		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1070	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1071				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1072			}
1073			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1074				bp->flow_ctrl = FLOW_CTRL_RX;
1075			}
1076		}
1077		else {
1078			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1079				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1080			}
1081		}
1082	}
1083	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1084		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1085			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1086
1087			bp->flow_ctrl = FLOW_CTRL_TX;
1088		}
1089	}
1090}
1091
1092static int
1093bnx2_5709s_linkup(struct bnx2 *bp)
1094{
1095	u32 val, speed;
1096
1097	bp->link_up = 1;
1098
1099	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1100	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1101	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1102
1103	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1104		bp->line_speed = bp->req_line_speed;
1105		bp->duplex = bp->req_duplex;
1106		return 0;
1107	}
1108	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1109	switch (speed) {
1110		case MII_BNX2_GP_TOP_AN_SPEED_10:
1111			bp->line_speed = SPEED_10;
1112			break;
1113		case MII_BNX2_GP_TOP_AN_SPEED_100:
1114			bp->line_speed = SPEED_100;
1115			break;
1116		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1117		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1118			bp->line_speed = SPEED_1000;
1119			break;
1120		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1121			bp->line_speed = SPEED_2500;
1122			break;
1123	}
1124	if (val & MII_BNX2_GP_TOP_AN_FD)
1125		bp->duplex = DUPLEX_FULL;
1126	else
1127		bp->duplex = DUPLEX_HALF;
1128	return 0;
1129}
1130
1131static int
1132bnx2_5708s_linkup(struct bnx2 *bp)
1133{
1134	u32 val;
1135
1136	bp->link_up = 1;
1137	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1138	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1139		case BCM5708S_1000X_STAT1_SPEED_10:
1140			bp->line_speed = SPEED_10;
1141			break;
1142		case BCM5708S_1000X_STAT1_SPEED_100:
1143			bp->line_speed = SPEED_100;
1144			break;
1145		case BCM5708S_1000X_STAT1_SPEED_1G:
1146			bp->line_speed = SPEED_1000;
1147			break;
1148		case BCM5708S_1000X_STAT1_SPEED_2G5:
1149			bp->line_speed = SPEED_2500;
1150			break;
1151	}
1152	if (val & BCM5708S_1000X_STAT1_FD)
1153		bp->duplex = DUPLEX_FULL;
1154	else
1155		bp->duplex = DUPLEX_HALF;
1156
1157	return 0;
1158}
1159
1160static int
1161bnx2_5706s_linkup(struct bnx2 *bp)
1162{
1163	u32 bmcr, local_adv, remote_adv, common;
1164
1165	bp->link_up = 1;
1166	bp->line_speed = SPEED_1000;
1167
1168	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1169	if (bmcr & BMCR_FULLDPLX) {
1170		bp->duplex = DUPLEX_FULL;
1171	}
1172	else {
1173		bp->duplex = DUPLEX_HALF;
1174	}
1175
1176	if (!(bmcr & BMCR_ANENABLE)) {
1177		return 0;
1178	}
1179
1180	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1181	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1182
1183	common = local_adv & remote_adv;
1184	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1185
1186		if (common & ADVERTISE_1000XFULL) {
1187			bp->duplex = DUPLEX_FULL;
1188		}
1189		else {
1190			bp->duplex = DUPLEX_HALF;
1191		}
1192	}
1193
1194	return 0;
1195}
1196
1197static int
1198bnx2_copper_linkup(struct bnx2 *bp)
1199{
1200	u32 bmcr;
1201
1202	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1203	if (bmcr & BMCR_ANENABLE) {
1204		u32 local_adv, remote_adv, common;
1205
1206		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1207		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1208
1209		common = local_adv & (remote_adv >> 2);
1210		if (common & ADVERTISE_1000FULL) {
1211			bp->line_speed = SPEED_1000;
1212			bp->duplex = DUPLEX_FULL;
1213		}
1214		else if (common & ADVERTISE_1000HALF) {
1215			bp->line_speed = SPEED_1000;
1216			bp->duplex = DUPLEX_HALF;
1217		}
1218		else {
1219			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1220			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1221
1222			common = local_adv & remote_adv;
1223			if (common & ADVERTISE_100FULL) {
1224				bp->line_speed = SPEED_100;
1225				bp->duplex = DUPLEX_FULL;
1226			}
1227			else if (common & ADVERTISE_100HALF) {
1228				bp->line_speed = SPEED_100;
1229				bp->duplex = DUPLEX_HALF;
1230			}
1231			else if (common & ADVERTISE_10FULL) {
1232				bp->line_speed = SPEED_10;
1233				bp->duplex = DUPLEX_FULL;
1234			}
1235			else if (common & ADVERTISE_10HALF) {
1236				bp->line_speed = SPEED_10;
1237				bp->duplex = DUPLEX_HALF;
1238			}
1239			else {
1240				bp->line_speed = 0;
1241				bp->link_up = 0;
1242			}
1243		}
1244	}
1245	else {
1246		if (bmcr & BMCR_SPEED100) {
1247			bp->line_speed = SPEED_100;
1248		}
1249		else {
1250			bp->line_speed = SPEED_10;
1251		}
1252		if (bmcr & BMCR_FULLDPLX) {
1253			bp->duplex = DUPLEX_FULL;
1254		}
1255		else {
1256			bp->duplex = DUPLEX_HALF;
1257		}
1258	}
1259
1260	return 0;
1261}
1262
1263static void
1264bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1265{
1266	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1267
1268	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1269	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1270	val |= 0x02 << 8;
1271
1272	if (bp->flow_ctrl & FLOW_CTRL_TX)
1273		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1274
1275	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1276}
1277
1278static void
1279bnx2_init_all_rx_contexts(struct bnx2 *bp)
1280{
1281	int i;
1282	u32 cid;
1283
1284	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1285		if (i == 1)
1286			cid = RX_RSS_CID;
1287		bnx2_init_rx_context(bp, cid);
1288	}
1289}
1290
1291static void
1292bnx2_set_mac_link(struct bnx2 *bp)
1293{
1294	u32 val;
1295
1296	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1297	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1298		(bp->duplex == DUPLEX_HALF)) {
1299		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1300	}
1301
1302	/* Configure the EMAC mode register. */
1303	val = REG_RD(bp, BNX2_EMAC_MODE);
1304
1305	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1306		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1307		BNX2_EMAC_MODE_25G_MODE);
1308
1309	if (bp->link_up) {
1310		switch (bp->line_speed) {
1311			case SPEED_10:
1312				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1313					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1314					break;
1315				}
1316				/* fall through */
1317			case SPEED_100:
1318				val |= BNX2_EMAC_MODE_PORT_MII;
1319				break;
1320			case SPEED_2500:
1321				val |= BNX2_EMAC_MODE_25G_MODE;
1322				/* fall through */
1323			case SPEED_1000:
1324				val |= BNX2_EMAC_MODE_PORT_GMII;
1325				break;
1326		}
1327	}
1328	else {
1329		val |= BNX2_EMAC_MODE_PORT_GMII;
1330	}
1331
1332	/* Set the MAC to operate in the appropriate duplex mode. */
1333	if (bp->duplex == DUPLEX_HALF)
1334		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1335	REG_WR(bp, BNX2_EMAC_MODE, val);
1336
1337	/* Enable/disable rx PAUSE. */
1338	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1339
1340	if (bp->flow_ctrl & FLOW_CTRL_RX)
1341		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1342	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1343
1344	/* Enable/disable tx PAUSE. */
1345	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1346	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1347
1348	if (bp->flow_ctrl & FLOW_CTRL_TX)
1349		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1350	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1351
1352	/* Acknowledge the interrupt. */
1353	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1354
1355	bnx2_init_all_rx_contexts(bp);
1356}
1357
1358static void
1359bnx2_enable_bmsr1(struct bnx2 *bp)
1360{
1361	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1362	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1363		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1364			       MII_BNX2_BLK_ADDR_GP_STATUS);
1365}
1366
1367static void
1368bnx2_disable_bmsr1(struct bnx2 *bp)
1369{
1370	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1371	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1372		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1373			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1374}
1375
1376static int
1377bnx2_test_and_enable_2g5(struct bnx2 *bp)
1378{
1379	u32 up1;
1380	int ret = 1;
1381
1382	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1383		return 0;
1384
1385	if (bp->autoneg & AUTONEG_SPEED)
1386		bp->advertising |= ADVERTISED_2500baseX_Full;
1387
1388	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1389		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1390
1391	bnx2_read_phy(bp, bp->mii_up1, &up1);
1392	if (!(up1 & BCM5708S_UP1_2G5)) {
1393		up1 |= BCM5708S_UP1_2G5;
1394		bnx2_write_phy(bp, bp->mii_up1, up1);
1395		ret = 0;
1396	}
1397
1398	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1399		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1401
1402	return ret;
1403}
1404
1405static int
1406bnx2_test_and_disable_2g5(struct bnx2 *bp)
1407{
1408	u32 up1;
1409	int ret = 0;
1410
1411	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1412		return 0;
1413
1414	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1415		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1416
1417	bnx2_read_phy(bp, bp->mii_up1, &up1);
1418	if (up1 & BCM5708S_UP1_2G5) {
1419		up1 &= ~BCM5708S_UP1_2G5;
1420		bnx2_write_phy(bp, bp->mii_up1, up1);
1421		ret = 1;
1422	}
1423
1424	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1425		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1426			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1427
1428	return ret;
1429}
1430
1431static void
1432bnx2_enable_forced_2g5(struct bnx2 *bp)
1433{
1434	u32 uninitialized_var(bmcr);
1435	int err;
1436
1437	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1438		return;
1439
1440	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1441		u32 val;
1442
1443		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1445		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1446			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1447			val |= MII_BNX2_SD_MISC1_FORCE |
1448				MII_BNX2_SD_MISC1_FORCE_2_5G;
1449			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1450		}
1451
1452		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1453			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1454		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1455
1456	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1457		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1458		if (!err)
1459			bmcr |= BCM5708S_BMCR_FORCE_2500;
1460	} else {
1461		return;
1462	}
1463
1464	if (err)
1465		return;
1466
1467	if (bp->autoneg & AUTONEG_SPEED) {
1468		bmcr &= ~BMCR_ANENABLE;
1469		if (bp->req_duplex == DUPLEX_FULL)
1470			bmcr |= BMCR_FULLDPLX;
1471	}
1472	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1473}
1474
1475static void
1476bnx2_disable_forced_2g5(struct bnx2 *bp)
1477{
1478	u32 uninitialized_var(bmcr);
1479	int err;
1480
1481	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1482		return;
1483
1484	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1485		u32 val;
1486
1487		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1488			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1489		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1490			val &= ~MII_BNX2_SD_MISC1_FORCE;
1491			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1492		}
1493
1494		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500		if (!err)
1501			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1502	} else {
1503		return;
1504	}
1505
1506	if (err)
1507		return;
1508
1509	if (bp->autoneg & AUTONEG_SPEED)
1510		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1511	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1512}
1513
1514static void
1515bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1516{
1517	u32 val;
1518
1519	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1520	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1521	if (start)
1522		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1523	else
1524		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1525}
1526
1527static int
1528bnx2_set_link(struct bnx2 *bp)
1529{
1530	u32 bmsr;
1531	u8 link_up;
1532
1533	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1534		bp->link_up = 1;
1535		return 0;
1536	}
1537
1538	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1539		return 0;
1540
1541	link_up = bp->link_up;
1542
1543	bnx2_enable_bmsr1(bp);
1544	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1545	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1546	bnx2_disable_bmsr1(bp);
1547
1548	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1549	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1550		u32 val, an_dbg;
1551
1552		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1553			bnx2_5706s_force_link_dn(bp, 0);
1554			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1555		}
1556		val = REG_RD(bp, BNX2_EMAC_STATUS);
1557
1558		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1559		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1560		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1561
1562		if ((val & BNX2_EMAC_STATUS_LINK) &&
1563		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1564			bmsr |= BMSR_LSTATUS;
1565		else
1566			bmsr &= ~BMSR_LSTATUS;
1567	}
1568
1569	if (bmsr & BMSR_LSTATUS) {
1570		bp->link_up = 1;
1571
1572		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1573			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1574				bnx2_5706s_linkup(bp);
1575			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1576				bnx2_5708s_linkup(bp);
1577			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1578				bnx2_5709s_linkup(bp);
1579		}
1580		else {
1581			bnx2_copper_linkup(bp);
1582		}
1583		bnx2_resolve_flow_ctrl(bp);
1584	}
1585	else {
1586		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1587		    (bp->autoneg & AUTONEG_SPEED))
1588			bnx2_disable_forced_2g5(bp);
1589
1590		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1591			u32 bmcr;
1592
1593			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1594			bmcr |= BMCR_ANENABLE;
1595			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1596
1597			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1598		}
1599		bp->link_up = 0;
1600	}
1601
1602	if (bp->link_up != link_up) {
1603		bnx2_report_link(bp);
1604	}
1605
1606	bnx2_set_mac_link(bp);
1607
1608	return 0;
1609}
1610
1611static int
1612bnx2_reset_phy(struct bnx2 *bp)
1613{
1614	int i;
1615	u32 reg;
1616
1617        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1618
1619#define PHY_RESET_MAX_WAIT 100
1620	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1621		udelay(10);
1622
1623		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1624		if (!(reg & BMCR_RESET)) {
1625			udelay(20);
1626			break;
1627		}
1628	}
1629	if (i == PHY_RESET_MAX_WAIT) {
1630		return -EBUSY;
1631	}
1632	return 0;
1633}
1634
1635static u32
1636bnx2_phy_get_pause_adv(struct bnx2 *bp)
1637{
1638	u32 adv = 0;
1639
1640	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1641		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1642
1643		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1644			adv = ADVERTISE_1000XPAUSE;
1645		}
1646		else {
1647			adv = ADVERTISE_PAUSE_CAP;
1648		}
1649	}
1650	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1651		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1652			adv = ADVERTISE_1000XPSE_ASYM;
1653		}
1654		else {
1655			adv = ADVERTISE_PAUSE_ASYM;
1656		}
1657	}
1658	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1659		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1660			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1661		}
1662		else {
1663			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1664		}
1665	}
1666	return adv;
1667}
1668
1669static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1670
1671static int
1672bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1673__releases(&bp->phy_lock)
1674__acquires(&bp->phy_lock)
1675{
1676	u32 speed_arg = 0, pause_adv;
1677
1678	pause_adv = bnx2_phy_get_pause_adv(bp);
1679
1680	if (bp->autoneg & AUTONEG_SPEED) {
1681		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1682		if (bp->advertising & ADVERTISED_10baseT_Half)
1683			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1684		if (bp->advertising & ADVERTISED_10baseT_Full)
1685			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1686		if (bp->advertising & ADVERTISED_100baseT_Half)
1687			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1688		if (bp->advertising & ADVERTISED_100baseT_Full)
1689			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1690		if (bp->advertising & ADVERTISED_1000baseT_Full)
1691			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1692		if (bp->advertising & ADVERTISED_2500baseX_Full)
1693			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1694	} else {
1695		if (bp->req_line_speed == SPEED_2500)
1696			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1697		else if (bp->req_line_speed == SPEED_1000)
1698			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1699		else if (bp->req_line_speed == SPEED_100) {
1700			if (bp->req_duplex == DUPLEX_FULL)
1701				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1702			else
1703				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1704		} else if (bp->req_line_speed == SPEED_10) {
1705			if (bp->req_duplex == DUPLEX_FULL)
1706				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707			else
1708				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1709		}
1710	}
1711
1712	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1713		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1714	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1715		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1716
1717	if (port == PORT_TP)
1718		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1719			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1720
1721	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1722
1723	spin_unlock_bh(&bp->phy_lock);
1724	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1725	spin_lock_bh(&bp->phy_lock);
1726
1727	return 0;
1728}
1729
1730static int
1731bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1732__releases(&bp->phy_lock)
1733__acquires(&bp->phy_lock)
1734{
1735	u32 adv, bmcr;
1736	u32 new_adv = 0;
1737
1738	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1739		return bnx2_setup_remote_phy(bp, port);
1740
1741	if (!(bp->autoneg & AUTONEG_SPEED)) {
1742		u32 new_bmcr;
1743		int force_link_down = 0;
1744
1745		if (bp->req_line_speed == SPEED_2500) {
1746			if (!bnx2_test_and_enable_2g5(bp))
1747				force_link_down = 1;
1748		} else if (bp->req_line_speed == SPEED_1000) {
1749			if (bnx2_test_and_disable_2g5(bp))
1750				force_link_down = 1;
1751		}
1752		bnx2_read_phy(bp, bp->mii_adv, &adv);
1753		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1754
1755		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1756		new_bmcr = bmcr & ~BMCR_ANENABLE;
1757		new_bmcr |= BMCR_SPEED1000;
1758
1759		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1760			if (bp->req_line_speed == SPEED_2500)
1761				bnx2_enable_forced_2g5(bp);
1762			else if (bp->req_line_speed == SPEED_1000) {
1763				bnx2_disable_forced_2g5(bp);
1764				new_bmcr &= ~0x2000;
1765			}
1766
1767		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1768			if (bp->req_line_speed == SPEED_2500)
1769				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1770			else
1771				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1772		}
1773
1774		if (bp->req_duplex == DUPLEX_FULL) {
1775			adv |= ADVERTISE_1000XFULL;
1776			new_bmcr |= BMCR_FULLDPLX;
1777		}
1778		else {
1779			adv |= ADVERTISE_1000XHALF;
1780			new_bmcr &= ~BMCR_FULLDPLX;
1781		}
1782		if ((new_bmcr != bmcr) || (force_link_down)) {
1783			/* Force a link down visible on the other side */
1784			if (bp->link_up) {
1785				bnx2_write_phy(bp, bp->mii_adv, adv &
1786					       ~(ADVERTISE_1000XFULL |
1787						 ADVERTISE_1000XHALF));
1788				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1789					BMCR_ANRESTART | BMCR_ANENABLE);
1790
1791				bp->link_up = 0;
1792				netif_carrier_off(bp->dev);
1793				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1794				bnx2_report_link(bp);
1795			}
1796			bnx2_write_phy(bp, bp->mii_adv, adv);
1797			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1798		} else {
1799			bnx2_resolve_flow_ctrl(bp);
1800			bnx2_set_mac_link(bp);
1801		}
1802		return 0;
1803	}
1804
1805	bnx2_test_and_enable_2g5(bp);
1806
1807	if (bp->advertising & ADVERTISED_1000baseT_Full)
1808		new_adv |= ADVERTISE_1000XFULL;
1809
1810	new_adv |= bnx2_phy_get_pause_adv(bp);
1811
1812	bnx2_read_phy(bp, bp->mii_adv, &adv);
1813	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1814
1815	bp->serdes_an_pending = 0;
1816	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1817		/* Force a link down visible on the other side */
1818		if (bp->link_up) {
1819			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1820			spin_unlock_bh(&bp->phy_lock);
1821			msleep(20);
1822			spin_lock_bh(&bp->phy_lock);
1823		}
1824
1825		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1826		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1827			BMCR_ANENABLE);
1828		/* Speed up link-up time when the link partner
1829		 * does not autonegotiate which is very common
1830		 * in blade servers. Some blade servers use
1831		 * IPMI for kerboard input and it's important
1832		 * to minimize link disruptions. Autoneg. involves
1833		 * exchanging base pages plus 3 next pages and
1834		 * normally completes in about 120 msec.
1835		 */
1836		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1837		bp->serdes_an_pending = 1;
1838		mod_timer(&bp->timer, jiffies + bp->current_interval);
1839	} else {
1840		bnx2_resolve_flow_ctrl(bp);
1841		bnx2_set_mac_link(bp);
1842	}
1843
1844	return 0;
1845}
1846
1847#define ETHTOOL_ALL_FIBRE_SPEED						\
1848	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1849		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1850		(ADVERTISED_1000baseT_Full)
1851
1852#define ETHTOOL_ALL_COPPER_SPEED					\
1853	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1854	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1855	ADVERTISED_1000baseT_Full)
1856
1857#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1858	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1859
1860#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1861
1862static void
1863bnx2_set_default_remote_link(struct bnx2 *bp)
1864{
1865	u32 link;
1866
1867	if (bp->phy_port == PORT_TP)
1868		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1869	else
1870		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1871
1872	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1873		bp->req_line_speed = 0;
1874		bp->autoneg |= AUTONEG_SPEED;
1875		bp->advertising = ADVERTISED_Autoneg;
1876		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1877			bp->advertising |= ADVERTISED_10baseT_Half;
1878		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1879			bp->advertising |= ADVERTISED_10baseT_Full;
1880		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1881			bp->advertising |= ADVERTISED_100baseT_Half;
1882		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1883			bp->advertising |= ADVERTISED_100baseT_Full;
1884		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1885			bp->advertising |= ADVERTISED_1000baseT_Full;
1886		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1887			bp->advertising |= ADVERTISED_2500baseX_Full;
1888	} else {
1889		bp->autoneg = 0;
1890		bp->advertising = 0;
1891		bp->req_duplex = DUPLEX_FULL;
1892		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1893			bp->req_line_speed = SPEED_10;
1894			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1895				bp->req_duplex = DUPLEX_HALF;
1896		}
1897		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1898			bp->req_line_speed = SPEED_100;
1899			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1900				bp->req_duplex = DUPLEX_HALF;
1901		}
1902		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1903			bp->req_line_speed = SPEED_1000;
1904		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1905			bp->req_line_speed = SPEED_2500;
1906	}
1907}
1908
1909static void
1910bnx2_set_default_link(struct bnx2 *bp)
1911{
1912	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1913		bnx2_set_default_remote_link(bp);
1914		return;
1915	}
1916
1917	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1918	bp->req_line_speed = 0;
1919	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1920		u32 reg;
1921
1922		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1923
1924		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1925		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1926		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1927			bp->autoneg = 0;
1928			bp->req_line_speed = bp->line_speed = SPEED_1000;
1929			bp->req_duplex = DUPLEX_FULL;
1930		}
1931	} else
1932		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1933}
1934
1935static void
1936bnx2_send_heart_beat(struct bnx2 *bp)
1937{
1938	u32 msg;
1939	u32 addr;
1940
1941	spin_lock(&bp->indirect_lock);
1942	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1943	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1944	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1945	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1946	spin_unlock(&bp->indirect_lock);
1947}
1948
1949static void
1950bnx2_remote_phy_event(struct bnx2 *bp)
1951{
1952	u32 msg;
1953	u8 link_up = bp->link_up;
1954	u8 old_port;
1955
1956	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1957
1958	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1959		bnx2_send_heart_beat(bp);
1960
1961	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1962
1963	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1964		bp->link_up = 0;
1965	else {
1966		u32 speed;
1967
1968		bp->link_up = 1;
1969		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1970		bp->duplex = DUPLEX_FULL;
1971		switch (speed) {
1972			case BNX2_LINK_STATUS_10HALF:
1973				bp->duplex = DUPLEX_HALF;
1974			case BNX2_LINK_STATUS_10FULL:
1975				bp->line_speed = SPEED_10;
1976				break;
1977			case BNX2_LINK_STATUS_100HALF:
1978				bp->duplex = DUPLEX_HALF;
1979			case BNX2_LINK_STATUS_100BASE_T4:
1980			case BNX2_LINK_STATUS_100FULL:
1981				bp->line_speed = SPEED_100;
1982				break;
1983			case BNX2_LINK_STATUS_1000HALF:
1984				bp->duplex = DUPLEX_HALF;
1985			case BNX2_LINK_STATUS_1000FULL:
1986				bp->line_speed = SPEED_1000;
1987				break;
1988			case BNX2_LINK_STATUS_2500HALF:
1989				bp->duplex = DUPLEX_HALF;
1990			case BNX2_LINK_STATUS_2500FULL:
1991				bp->line_speed = SPEED_2500;
1992				break;
1993			default:
1994				bp->line_speed = 0;
1995				break;
1996		}
1997
1998		bp->flow_ctrl = 0;
1999		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2000		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2001			if (bp->duplex == DUPLEX_FULL)
2002				bp->flow_ctrl = bp->req_flow_ctrl;
2003		} else {
2004			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2005				bp->flow_ctrl |= FLOW_CTRL_TX;
2006			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2007				bp->flow_ctrl |= FLOW_CTRL_RX;
2008		}
2009
2010		old_port = bp->phy_port;
2011		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2012			bp->phy_port = PORT_FIBRE;
2013		else
2014			bp->phy_port = PORT_TP;
2015
2016		if (old_port != bp->phy_port)
2017			bnx2_set_default_link(bp);
2018
2019	}
2020	if (bp->link_up != link_up)
2021		bnx2_report_link(bp);
2022
2023	bnx2_set_mac_link(bp);
2024}
2025
2026static int
2027bnx2_set_remote_link(struct bnx2 *bp)
2028{
2029	u32 evt_code;
2030
2031	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2032	switch (evt_code) {
2033		case BNX2_FW_EVT_CODE_LINK_EVENT:
2034			bnx2_remote_phy_event(bp);
2035			break;
2036		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2037		default:
2038			bnx2_send_heart_beat(bp);
2039			break;
2040	}
2041	return 0;
2042}
2043
2044static int
2045bnx2_setup_copper_phy(struct bnx2 *bp)
2046__releases(&bp->phy_lock)
2047__acquires(&bp->phy_lock)
2048{
2049	u32 bmcr;
2050	u32 new_bmcr;
2051
2052	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2053
2054	if (bp->autoneg & AUTONEG_SPEED) {
2055		u32 adv_reg, adv1000_reg;
2056		u32 new_adv_reg = 0;
2057		u32 new_adv1000_reg = 0;
2058
2059		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2060		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2061			ADVERTISE_PAUSE_ASYM);
2062
2063		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2064		adv1000_reg &= PHY_ALL_1000_SPEED;
2065
2066		if (bp->advertising & ADVERTISED_10baseT_Half)
2067			new_adv_reg |= ADVERTISE_10HALF;
2068		if (bp->advertising & ADVERTISED_10baseT_Full)
2069			new_adv_reg |= ADVERTISE_10FULL;
2070		if (bp->advertising & ADVERTISED_100baseT_Half)
2071			new_adv_reg |= ADVERTISE_100HALF;
2072		if (bp->advertising & ADVERTISED_100baseT_Full)
2073			new_adv_reg |= ADVERTISE_100FULL;
2074		if (bp->advertising & ADVERTISED_1000baseT_Full)
2075			new_adv1000_reg |= ADVERTISE_1000FULL;
2076
2077		new_adv_reg |= ADVERTISE_CSMA;
2078
2079		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2080
2081		if ((adv1000_reg != new_adv1000_reg) ||
2082			(adv_reg != new_adv_reg) ||
2083			((bmcr & BMCR_ANENABLE) == 0)) {
2084
2085			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2086			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2087			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2088				BMCR_ANENABLE);
2089		}
2090		else if (bp->link_up) {
2091			/* Flow ctrl may have changed from auto to forced */
2092			/* or vice-versa. */
2093
2094			bnx2_resolve_flow_ctrl(bp);
2095			bnx2_set_mac_link(bp);
2096		}
2097		return 0;
2098	}
2099
2100	new_bmcr = 0;
2101	if (bp->req_line_speed == SPEED_100) {
2102		new_bmcr |= BMCR_SPEED100;
2103	}
2104	if (bp->req_duplex == DUPLEX_FULL) {
2105		new_bmcr |= BMCR_FULLDPLX;
2106	}
2107	if (new_bmcr != bmcr) {
2108		u32 bmsr;
2109
2110		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2111		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112
2113		if (bmsr & BMSR_LSTATUS) {
2114			/* Force link down */
2115			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2116			spin_unlock_bh(&bp->phy_lock);
2117			msleep(50);
2118			spin_lock_bh(&bp->phy_lock);
2119
2120			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2121			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2122		}
2123
2124		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2125
2126		/* Normally, the new speed is setup after the link has
2127		 * gone down and up again. In some cases, link will not go
2128		 * down so we need to set up the new speed here.
2129		 */
2130		if (bmsr & BMSR_LSTATUS) {
2131			bp->line_speed = bp->req_line_speed;
2132			bp->duplex = bp->req_duplex;
2133			bnx2_resolve_flow_ctrl(bp);
2134			bnx2_set_mac_link(bp);
2135		}
2136	} else {
2137		bnx2_resolve_flow_ctrl(bp);
2138		bnx2_set_mac_link(bp);
2139	}
2140	return 0;
2141}
2142
2143static int
2144bnx2_setup_phy(struct bnx2 *bp, u8 port)
2145__releases(&bp->phy_lock)
2146__acquires(&bp->phy_lock)
2147{
2148	if (bp->loopback == MAC_LOOPBACK)
2149		return 0;
2150
2151	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2152		return bnx2_setup_serdes_phy(bp, port);
2153	}
2154	else {
2155		return bnx2_setup_copper_phy(bp);
2156	}
2157}
2158
2159static int
2160bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2161{
2162	u32 val;
2163
2164	bp->mii_bmcr = MII_BMCR + 0x10;
2165	bp->mii_bmsr = MII_BMSR + 0x10;
2166	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2167	bp->mii_adv = MII_ADVERTISE + 0x10;
2168	bp->mii_lpa = MII_LPA + 0x10;
2169	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2170
2171	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2172	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2173
2174	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2175	if (reset_phy)
2176		bnx2_reset_phy(bp);
2177
2178	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2179
2180	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2181	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2182	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2183	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2184
2185	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2186	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2187	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2188		val |= BCM5708S_UP1_2G5;
2189	else
2190		val &= ~BCM5708S_UP1_2G5;
2191	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2192
2193	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2194	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2195	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2196	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2197
2198	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2199
2200	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2201	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2202	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2203
2204	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2205
2206	return 0;
2207}
2208
2209static int
2210bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2211{
2212	u32 val;
2213
2214	if (reset_phy)
2215		bnx2_reset_phy(bp);
2216
2217	bp->mii_up1 = BCM5708S_UP1;
2218
2219	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2220	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2221	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2222
2223	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2224	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2225	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2226
2227	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2228	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2229	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2230
2231	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2232		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2233		val |= BCM5708S_UP1_2G5;
2234		bnx2_write_phy(bp, BCM5708S_UP1, val);
2235	}
2236
2237	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2238	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2239	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2240		/* increase tx signal amplitude */
2241		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2242			       BCM5708S_BLK_ADDR_TX_MISC);
2243		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2244		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2245		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2246		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2247	}
2248
2249	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2250	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2251
2252	if (val) {
2253		u32 is_backplane;
2254
2255		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2256		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2257			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2258				       BCM5708S_BLK_ADDR_TX_MISC);
2259			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2260			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2261				       BCM5708S_BLK_ADDR_DIG);
2262		}
2263	}
2264	return 0;
2265}
2266
2267static int
2268bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2269{
2270	if (reset_phy)
2271		bnx2_reset_phy(bp);
2272
2273	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2274
2275	if (CHIP_NUM(bp) == CHIP_NUM_5706)
2276        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2277
2278	if (bp->dev->mtu > 1500) {
2279		u32 val;
2280
2281		/* Set extended packet length bit */
2282		bnx2_write_phy(bp, 0x18, 0x7);
2283		bnx2_read_phy(bp, 0x18, &val);
2284		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2285
2286		bnx2_write_phy(bp, 0x1c, 0x6c00);
2287		bnx2_read_phy(bp, 0x1c, &val);
2288		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2289	}
2290	else {
2291		u32 val;
2292
2293		bnx2_write_phy(bp, 0x18, 0x7);
2294		bnx2_read_phy(bp, 0x18, &val);
2295		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2296
2297		bnx2_write_phy(bp, 0x1c, 0x6c00);
2298		bnx2_read_phy(bp, 0x1c, &val);
2299		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2300	}
2301
2302	return 0;
2303}
2304
2305static int
2306bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2307{
2308	u32 val;
2309
2310	if (reset_phy)
2311		bnx2_reset_phy(bp);
2312
2313	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2314		bnx2_write_phy(bp, 0x18, 0x0c00);
2315		bnx2_write_phy(bp, 0x17, 0x000a);
2316		bnx2_write_phy(bp, 0x15, 0x310b);
2317		bnx2_write_phy(bp, 0x17, 0x201f);
2318		bnx2_write_phy(bp, 0x15, 0x9506);
2319		bnx2_write_phy(bp, 0x17, 0x401f);
2320		bnx2_write_phy(bp, 0x15, 0x14e2);
2321		bnx2_write_phy(bp, 0x18, 0x0400);
2322	}
2323
2324	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2325		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2326			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2327		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2328		val &= ~(1 << 8);
2329		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2330	}
2331
2332	if (bp->dev->mtu > 1500) {
2333		/* Set extended packet length bit */
2334		bnx2_write_phy(bp, 0x18, 0x7);
2335		bnx2_read_phy(bp, 0x18, &val);
2336		bnx2_write_phy(bp, 0x18, val | 0x4000);
2337
2338		bnx2_read_phy(bp, 0x10, &val);
2339		bnx2_write_phy(bp, 0x10, val | 0x1);
2340	}
2341	else {
2342		bnx2_write_phy(bp, 0x18, 0x7);
2343		bnx2_read_phy(bp, 0x18, &val);
2344		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2345
2346		bnx2_read_phy(bp, 0x10, &val);
2347		bnx2_write_phy(bp, 0x10, val & ~0x1);
2348	}
2349
2350	/* ethernet@wirespeed */
2351	bnx2_write_phy(bp, 0x18, 0x7007);
2352	bnx2_read_phy(bp, 0x18, &val);
2353	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2354	return 0;
2355}
2356
2357
2358static int
2359bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2360__releases(&bp->phy_lock)
2361__acquires(&bp->phy_lock)
2362{
2363	u32 val;
2364	int rc = 0;
2365
2366	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2367	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2368
2369	bp->mii_bmcr = MII_BMCR;
2370	bp->mii_bmsr = MII_BMSR;
2371	bp->mii_bmsr1 = MII_BMSR;
2372	bp->mii_adv = MII_ADVERTISE;
2373	bp->mii_lpa = MII_LPA;
2374
2375        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2376
2377	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2378		goto setup_phy;
2379
2380	bnx2_read_phy(bp, MII_PHYSID1, &val);
2381	bp->phy_id = val << 16;
2382	bnx2_read_phy(bp, MII_PHYSID2, &val);
2383	bp->phy_id |= val & 0xffff;
2384
2385	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2386		if (CHIP_NUM(bp) == CHIP_NUM_5706)
2387			rc = bnx2_init_5706s_phy(bp, reset_phy);
2388		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2389			rc = bnx2_init_5708s_phy(bp, reset_phy);
2390		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2391			rc = bnx2_init_5709s_phy(bp, reset_phy);
2392	}
2393	else {
2394		rc = bnx2_init_copper_phy(bp, reset_phy);
2395	}
2396
2397setup_phy:
2398	if (!rc)
2399		rc = bnx2_setup_phy(bp, bp->phy_port);
2400
2401	return rc;
2402}
2403
2404static int
2405bnx2_set_mac_loopback(struct bnx2 *bp)
2406{
2407	u32 mac_mode;
2408
2409	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2410	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2411	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2412	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2413	bp->link_up = 1;
2414	return 0;
2415}
2416
2417static int bnx2_test_link(struct bnx2 *);
2418
2419static int
2420bnx2_set_phy_loopback(struct bnx2 *bp)
2421{
2422	u32 mac_mode;
2423	int rc, i;
2424
2425	spin_lock_bh(&bp->phy_lock);
2426	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2427			    BMCR_SPEED1000);
2428	spin_unlock_bh(&bp->phy_lock);
2429	if (rc)
2430		return rc;
2431
2432	for (i = 0; i < 10; i++) {
2433		if (bnx2_test_link(bp) == 0)
2434			break;
2435		msleep(100);
2436	}
2437
2438	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2439	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2440		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2441		      BNX2_EMAC_MODE_25G_MODE);
2442
2443	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2444	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2445	bp->link_up = 1;
2446	return 0;
2447}
2448
2449static void
2450bnx2_dump_mcp_state(struct bnx2 *bp)
2451{
2452	struct net_device *dev = bp->dev;
2453	u32 mcp_p0, mcp_p1;
2454
2455	netdev_err(dev, "<--- start MCP states dump --->\n");
2456	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2457		mcp_p0 = BNX2_MCP_STATE_P0;
2458		mcp_p1 = BNX2_MCP_STATE_P1;
2459	} else {
2460		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2461		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2462	}
2463	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2464		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2465	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2466		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2467		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2468		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2469	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2470		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2471		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2472		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2473	netdev_err(dev, "DEBUG: shmem states:\n");
2474	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2475		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2476		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2477		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2478	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2479	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2480		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2481		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2482	pr_cont(" condition[%08x]\n",
2483		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2484	DP_SHMEM_LINE(bp, 0x3cc);
2485	DP_SHMEM_LINE(bp, 0x3dc);
2486	DP_SHMEM_LINE(bp, 0x3ec);
2487	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2488	netdev_err(dev, "<--- end MCP states dump --->\n");
2489}
2490
2491static int
2492bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2493{
2494	int i;
2495	u32 val;
2496
2497	bp->fw_wr_seq++;
2498	msg_data |= bp->fw_wr_seq;
2499
2500	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2501
2502	if (!ack)
2503		return 0;
2504
2505	/* wait for an acknowledgement. */
2506	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2507		msleep(10);
2508
2509		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2510
2511		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2512			break;
2513	}
2514	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2515		return 0;
2516
2517	/* If we timed out, inform the firmware that this is the case. */
2518	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2519		msg_data &= ~BNX2_DRV_MSG_CODE;
2520		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2521
2522		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2523		if (!silent) {
2524			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2525			bnx2_dump_mcp_state(bp);
2526		}
2527
2528		return -EBUSY;
2529	}
2530
2531	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2532		return -EIO;
2533
2534	return 0;
2535}
2536
2537static int
2538bnx2_init_5709_context(struct bnx2 *bp)
2539{
2540	int i, ret = 0;
2541	u32 val;
2542
2543	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2544	val |= (BCM_PAGE_BITS - 8) << 16;
2545	REG_WR(bp, BNX2_CTX_COMMAND, val);
2546	for (i = 0; i < 10; i++) {
2547		val = REG_RD(bp, BNX2_CTX_COMMAND);
2548		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2549			break;
2550		udelay(2);
2551	}
2552	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2553		return -EBUSY;
2554
2555	for (i = 0; i < bp->ctx_pages; i++) {
2556		int j;
2557
2558		if (bp->ctx_blk[i])
2559			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2560		else
2561			return -ENOMEM;
2562
2563		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2564		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2565		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2566		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2567		       (u64) bp->ctx_blk_mapping[i] >> 32);
2568		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2569		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2570		for (j = 0; j < 10; j++) {
2571
2572			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2573			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2574				break;
2575			udelay(5);
2576		}
2577		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2578			ret = -EBUSY;
2579			break;
2580		}
2581	}
2582	return ret;
2583}
2584
2585static void
2586bnx2_init_context(struct bnx2 *bp)
2587{
2588	u32 vcid;
2589
2590	vcid = 96;
2591	while (vcid) {
2592		u32 vcid_addr, pcid_addr, offset;
2593		int i;
2594
2595		vcid--;
2596
2597		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2598			u32 new_vcid;
2599
2600			vcid_addr = GET_PCID_ADDR(vcid);
2601			if (vcid & 0x8) {
2602				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2603			}
2604			else {
2605				new_vcid = vcid;
2606			}
2607			pcid_addr = GET_PCID_ADDR(new_vcid);
2608		}
2609		else {
2610	    		vcid_addr = GET_CID_ADDR(vcid);
2611			pcid_addr = vcid_addr;
2612		}
2613
2614		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2615			vcid_addr += (i << PHY_CTX_SHIFT);
2616			pcid_addr += (i << PHY_CTX_SHIFT);
2617
2618			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2619			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2620
2621			/* Zero out the context. */
2622			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2623				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2624		}
2625	}
2626}
2627
2628static int
2629bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2630{
2631	u16 *good_mbuf;
2632	u32 good_mbuf_cnt;
2633	u32 val;
2634
2635	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2636	if (good_mbuf == NULL) {
2637		pr_err("Failed to allocate memory in %s\n", __func__);
2638		return -ENOMEM;
2639	}
2640
2641	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2642		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2643
2644	good_mbuf_cnt = 0;
2645
2646	/* Allocate a bunch of mbufs and save the good ones in an array. */
2647	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2648	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2649		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2650				BNX2_RBUF_COMMAND_ALLOC_REQ);
2651
2652		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2653
2654		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2655
2656		/* The addresses with Bit 9 set are bad memory blocks. */
2657		if (!(val & (1 << 9))) {
2658			good_mbuf[good_mbuf_cnt] = (u16) val;
2659			good_mbuf_cnt++;
2660		}
2661
2662		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2663	}
2664
2665	/* Free the good ones back to the mbuf pool thus discarding
2666	 * all the bad ones. */
2667	while (good_mbuf_cnt) {
2668		good_mbuf_cnt--;
2669
2670		val = good_mbuf[good_mbuf_cnt];
2671		val = (val << 9) | val | 1;
2672
2673		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2674	}
2675	kfree(good_mbuf);
2676	return 0;
2677}
2678
2679static void
2680bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2681{
2682	u32 val;
2683
2684	val = (mac_addr[0] << 8) | mac_addr[1];
2685
2686	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2687
2688	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2689		(mac_addr[4] << 8) | mac_addr[5];
2690
2691	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2692}
2693
2694static inline int
2695bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2696{
2697	dma_addr_t mapping;
2698	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2699	struct rx_bd *rxbd =
2700		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2701	struct page *page = alloc_page(gfp);
2702
2703	if (!page)
2704		return -ENOMEM;
2705	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2706			       PCI_DMA_FROMDEVICE);
2707	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2708		__free_page(page);
2709		return -EIO;
2710	}
2711
2712	rx_pg->page = page;
2713	dma_unmap_addr_set(rx_pg, mapping, mapping);
2714	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2715	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2716	return 0;
2717}
2718
2719static void
2720bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2721{
2722	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2723	struct page *page = rx_pg->page;
2724
2725	if (!page)
2726		return;
2727
2728	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2729		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2730
2731	__free_page(page);
2732	rx_pg->page = NULL;
2733}
2734
2735static inline int
2736bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2737{
2738	struct sk_buff *skb;
2739	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2740	dma_addr_t mapping;
2741	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2742	unsigned long align;
2743
2744	skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2745	if (skb == NULL) {
2746		return -ENOMEM;
2747	}
2748
2749	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2750		skb_reserve(skb, BNX2_RX_ALIGN - align);
2751
2752	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2753				 PCI_DMA_FROMDEVICE);
2754	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2755		dev_kfree_skb(skb);
2756		return -EIO;
2757	}
2758
2759	rx_buf->skb = skb;
2760	rx_buf->desc = (struct l2_fhdr *) skb->data;
2761	dma_unmap_addr_set(rx_buf, mapping, mapping);
2762
2763	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2764	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2765
2766	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2767
2768	return 0;
2769}
2770
2771static int
2772bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2773{
2774	struct status_block *sblk = bnapi->status_blk.msi;
2775	u32 new_link_state, old_link_state;
2776	int is_set = 1;
2777
2778	new_link_state = sblk->status_attn_bits & event;
2779	old_link_state = sblk->status_attn_bits_ack & event;
2780	if (new_link_state != old_link_state) {
2781		if (new_link_state)
2782			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2783		else
2784			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2785	} else
2786		is_set = 0;
2787
2788	return is_set;
2789}
2790
2791static void
2792bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2793{
2794	spin_lock(&bp->phy_lock);
2795
2796	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2797		bnx2_set_link(bp);
2798	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2799		bnx2_set_remote_link(bp);
2800
2801	spin_unlock(&bp->phy_lock);
2802
2803}
2804
2805static inline u16
2806bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2807{
2808	u16 cons;
2809
2810	/* Tell compiler that status block fields can change. */
2811	barrier();
2812	cons = *bnapi->hw_tx_cons_ptr;
2813	barrier();
2814	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2815		cons++;
2816	return cons;
2817}
2818
2819static int
2820bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2821{
2822	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2823	u16 hw_cons, sw_cons, sw_ring_cons;
2824	int tx_pkt = 0, index;
2825	struct netdev_queue *txq;
2826
2827	index = (bnapi - bp->bnx2_napi);
2828	txq = netdev_get_tx_queue(bp->dev, index);
2829
2830	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2831	sw_cons = txr->tx_cons;
2832
2833	while (sw_cons != hw_cons) {
2834		struct sw_tx_bd *tx_buf;
2835		struct sk_buff *skb;
2836		int i, last;
2837
2838		sw_ring_cons = TX_RING_IDX(sw_cons);
2839
2840		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2841		skb = tx_buf->skb;
2842
2843		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2844		prefetch(&skb->end);
2845
2846		/* partial BD completions possible with TSO packets */
2847		if (tx_buf->is_gso) {
2848			u16 last_idx, last_ring_idx;
2849
2850			last_idx = sw_cons + tx_buf->nr_frags + 1;
2851			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2852			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2853				last_idx++;
2854			}
2855			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2856				break;
2857			}
2858		}
2859
2860		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2861			skb_headlen(skb), PCI_DMA_TODEVICE);
2862
2863		tx_buf->skb = NULL;
2864		last = tx_buf->nr_frags;
2865
2866		for (i = 0; i < last; i++) {
2867			sw_cons = NEXT_TX_BD(sw_cons);
2868
2869			dma_unmap_page(&bp->pdev->dev,
2870				dma_unmap_addr(
2871					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2872					mapping),
2873				skb_shinfo(skb)->frags[i].size,
2874				PCI_DMA_TODEVICE);
2875		}
2876
2877		sw_cons = NEXT_TX_BD(sw_cons);
2878
2879		dev_kfree_skb(skb);
2880		tx_pkt++;
2881		if (tx_pkt == budget)
2882			break;
2883
2884		if (hw_cons == sw_cons)
2885			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2886	}
2887
2888	txr->hw_tx_cons = hw_cons;
2889	txr->tx_cons = sw_cons;
2890
2891	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2892	 * before checking for netif_tx_queue_stopped().  Without the
2893	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2894	 * will miss it and cause the queue to be stopped forever.
2895	 */
2896	smp_mb();
2897
2898	if (unlikely(netif_tx_queue_stopped(txq)) &&
2899		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2900		__netif_tx_lock(txq, smp_processor_id());
2901		if ((netif_tx_queue_stopped(txq)) &&
2902		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2903			netif_tx_wake_queue(txq);
2904		__netif_tx_unlock(txq);
2905	}
2906
2907	return tx_pkt;
2908}
2909
2910static void
2911bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2912			struct sk_buff *skb, int count)
2913{
2914	struct sw_pg *cons_rx_pg, *prod_rx_pg;
2915	struct rx_bd *cons_bd, *prod_bd;
2916	int i;
2917	u16 hw_prod, prod;
2918	u16 cons = rxr->rx_pg_cons;
2919
2920	cons_rx_pg = &rxr->rx_pg_ring[cons];
2921
2922	/* The caller was unable to allocate a new page to replace the
2923	 * last one in the frags array, so we need to recycle that page
2924	 * and then free the skb.
2925	 */
2926	if (skb) {
2927		struct page *page;
2928		struct skb_shared_info *shinfo;
2929
2930		shinfo = skb_shinfo(skb);
2931		shinfo->nr_frags--;
2932		page = shinfo->frags[shinfo->nr_frags].page;
2933		shinfo->frags[shinfo->nr_frags].page = NULL;
2934
2935		cons_rx_pg->page = page;
2936		dev_kfree_skb(skb);
2937	}
2938
2939	hw_prod = rxr->rx_pg_prod;
2940
2941	for (i = 0; i < count; i++) {
2942		prod = RX_PG_RING_IDX(hw_prod);
2943
2944		prod_rx_pg = &rxr->rx_pg_ring[prod];
2945		cons_rx_pg = &rxr->rx_pg_ring[cons];
2946		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2947		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2948
2949		if (prod != cons) {
2950			prod_rx_pg->page = cons_rx_pg->page;
2951			cons_rx_pg->page = NULL;
2952			dma_unmap_addr_set(prod_rx_pg, mapping,
2953				dma_unmap_addr(cons_rx_pg, mapping));
2954
2955			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2957
2958		}
2959		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2960		hw_prod = NEXT_RX_BD(hw_prod);
2961	}
2962	rxr->rx_pg_prod = hw_prod;
2963	rxr->rx_pg_cons = cons;
2964}
2965
2966static inline void
2967bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2968		  struct sk_buff *skb, u16 cons, u16 prod)
2969{
2970	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2971	struct rx_bd *cons_bd, *prod_bd;
2972
2973	cons_rx_buf = &rxr->rx_buf_ring[cons];
2974	prod_rx_buf = &rxr->rx_buf_ring[prod];
2975
2976	dma_sync_single_for_device(&bp->pdev->dev,
2977		dma_unmap_addr(cons_rx_buf, mapping),
2978		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2979
2980	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2981
2982	prod_rx_buf->skb = skb;
2983	prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2984
2985	if (cons == prod)
2986		return;
2987
2988	dma_unmap_addr_set(prod_rx_buf, mapping,
2989			dma_unmap_addr(cons_rx_buf, mapping));
2990
2991	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2992	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2993	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2994	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2995}
2996
2997static int
2998bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2999	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3000	    u32 ring_idx)
3001{
3002	int err;
3003	u16 prod = ring_idx & 0xffff;
3004
3005	err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
3006	if (unlikely(err)) {
3007		bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
3008		if (hdr_len) {
3009			unsigned int raw_len = len + 4;
3010			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3011
3012			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3013		}
3014		return err;
3015	}
3016
3017	skb_reserve(skb, BNX2_RX_OFFSET);
3018	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3019			 PCI_DMA_FROMDEVICE);
3020
3021	if (hdr_len == 0) {
3022		skb_put(skb, len);
3023		return 0;
3024	} else {
3025		unsigned int i, frag_len, frag_size, pages;
3026		struct sw_pg *rx_pg;
3027		u16 pg_cons = rxr->rx_pg_cons;
3028		u16 pg_prod = rxr->rx_pg_prod;
3029
3030		frag_size = len + 4 - hdr_len;
3031		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3032		skb_put(skb, hdr_len);
3033
3034		for (i = 0; i < pages; i++) {
3035			dma_addr_t mapping_old;
3036
3037			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3038			if (unlikely(frag_len <= 4)) {
3039				unsigned int tail = 4 - frag_len;
3040
3041				rxr->rx_pg_cons = pg_cons;
3042				rxr->rx_pg_prod = pg_prod;
3043				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3044							pages - i);
3045				skb->len -= tail;
3046				if (i == 0) {
3047					skb->tail -= tail;
3048				} else {
3049					skb_frag_t *frag =
3050						&skb_shinfo(skb)->frags[i - 1];
3051					frag->size -= tail;
3052					skb->data_len -= tail;
3053					skb->truesize -= tail;
3054				}
3055				return 0;
3056			}
3057			rx_pg = &rxr->rx_pg_ring[pg_cons];
3058
3059			/* Don't unmap yet.  If we're unable to allocate a new
3060			 * page, we need to recycle the page and the DMA addr.
3061			 */
3062			mapping_old = dma_unmap_addr(rx_pg, mapping);
3063			if (i == pages - 1)
3064				frag_len -= 4;
3065
3066			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3067			rx_pg->page = NULL;
3068
3069			err = bnx2_alloc_rx_page(bp, rxr,
3070						 RX_PG_RING_IDX(pg_prod),
3071						 GFP_ATOMIC);
3072			if (unlikely(err)) {
3073				rxr->rx_pg_cons = pg_cons;
3074				rxr->rx_pg_prod = pg_prod;
3075				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3076							pages - i);
3077				return err;
3078			}
3079
3080			dma_unmap_page(&bp->pdev->dev, mapping_old,
3081				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3082
3083			frag_size -= frag_len;
3084			skb->data_len += frag_len;
3085			skb->truesize += frag_len;
3086			skb->len += frag_len;
3087
3088			pg_prod = NEXT_RX_BD(pg_prod);
3089			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3090		}
3091		rxr->rx_pg_prod = pg_prod;
3092		rxr->rx_pg_cons = pg_cons;
3093	}
3094	return 0;
3095}
3096
3097static inline u16
3098bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3099{
3100	u16 cons;
3101
3102	/* Tell compiler that status block fields can change. */
3103	barrier();
3104	cons = *bnapi->hw_rx_cons_ptr;
3105	barrier();
3106	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3107		cons++;
3108	return cons;
3109}
3110
3111static int
3112bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3113{
3114	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3115	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3116	struct l2_fhdr *rx_hdr;
3117	int rx_pkt = 0, pg_ring_used = 0;
3118
3119	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3120	sw_cons = rxr->rx_cons;
3121	sw_prod = rxr->rx_prod;
3122
3123	/* Memory barrier necessary as speculative reads of the rx
3124	 * buffer can be ahead of the index in the status block
3125	 */
3126	rmb();
3127	while (sw_cons != hw_cons) {
3128		unsigned int len, hdr_len;
3129		u32 status;
3130		struct sw_bd *rx_buf, *next_rx_buf;
3131		struct sk_buff *skb;
3132		dma_addr_t dma_addr;
3133
3134		sw_ring_cons = RX_RING_IDX(sw_cons);
3135		sw_ring_prod = RX_RING_IDX(sw_prod);
3136
3137		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3138		skb = rx_buf->skb;
3139		prefetchw(skb);
3140
3141		next_rx_buf =
3142			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3143		prefetch(next_rx_buf->desc);
3144
3145		rx_buf->skb = NULL;
3146
3147		dma_addr = dma_unmap_addr(rx_buf, mapping);
3148
3149		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3150			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3151			PCI_DMA_FROMDEVICE);
3152
3153		rx_hdr = rx_buf->desc;
3154		len = rx_hdr->l2_fhdr_pkt_len;
3155		status = rx_hdr->l2_fhdr_status;
3156
3157		hdr_len = 0;
3158		if (status & L2_FHDR_STATUS_SPLIT) {
3159			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3160			pg_ring_used = 1;
3161		} else if (len > bp->rx_jumbo_thresh) {
3162			hdr_len = bp->rx_jumbo_thresh;
3163			pg_ring_used = 1;
3164		}
3165
3166		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3167				       L2_FHDR_ERRORS_PHY_DECODE |
3168				       L2_FHDR_ERRORS_ALIGNMENT |
3169				       L2_FHDR_ERRORS_TOO_SHORT |
3170				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3171
3172			bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173					  sw_ring_prod);
3174			if (pg_ring_used) {
3175				int pages;
3176
3177				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3178
3179				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3180			}
3181			goto next_rx;
3182		}
3183
3184		len -= 4;
3185
3186		if (len <= bp->rx_copy_thresh) {
3187			struct sk_buff *new_skb;
3188
3189			new_skb = netdev_alloc_skb(bp->dev, len + 6);
3190			if (new_skb == NULL) {
3191				bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3192						  sw_ring_prod);
3193				goto next_rx;
3194			}
3195
3196			/* aligned copy */
3197			skb_copy_from_linear_data_offset(skb,
3198							 BNX2_RX_OFFSET - 6,
3199				      new_skb->data, len + 6);
3200			skb_reserve(new_skb, 6);
3201			skb_put(new_skb, len);
3202
3203			bnx2_reuse_rx_skb(bp, rxr, skb,
3204				sw_ring_cons, sw_ring_prod);
3205
3206			skb = new_skb;
3207		} else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3208			   dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3209			goto next_rx;
3210
3211		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3212		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3213			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
3214
3215		skb->protocol = eth_type_trans(skb, bp->dev);
3216
3217		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3218			(ntohs(skb->protocol) != 0x8100)) {
3219
3220			dev_kfree_skb(skb);
3221			goto next_rx;
3222
3223		}
3224
3225		skb_checksum_none_assert(skb);
3226		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3227			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3228			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3229
3230			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3231					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3232				skb->ip_summed = CHECKSUM_UNNECESSARY;
3233		}
3234		if ((bp->dev->features & NETIF_F_RXHASH) &&
3235		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3236		     L2_FHDR_STATUS_USE_RXHASH))
3237			skb->rxhash = rx_hdr->l2_fhdr_hash;
3238
3239		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3240		napi_gro_receive(&bnapi->napi, skb);
3241		rx_pkt++;
3242
3243next_rx:
3244		sw_cons = NEXT_RX_BD(sw_cons);
3245		sw_prod = NEXT_RX_BD(sw_prod);
3246
3247		if ((rx_pkt == budget))
3248			break;
3249
3250		/* Refresh hw_cons to see if there is new work */
3251		if (sw_cons == hw_cons) {
3252			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3253			rmb();
3254		}
3255	}
3256	rxr->rx_cons = sw_cons;
3257	rxr->rx_prod = sw_prod;
3258
3259	if (pg_ring_used)
3260		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3261
3262	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3263
3264	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3265
3266	mmiowb();
3267
3268	return rx_pkt;
3269
3270}
3271
3272/* MSI ISR - The only difference between this and the INTx ISR
3273 * is that the MSI interrupt is always serviced.
3274 */
3275static irqreturn_t
3276bnx2_msi(int irq, void *dev_instance)
3277{
3278	struct bnx2_napi *bnapi = dev_instance;
3279	struct bnx2 *bp = bnapi->bp;
3280
3281	prefetch(bnapi->status_blk.msi);
3282	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3283		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3284		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3285
3286	/* Return here if interrupt is disabled. */
3287	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3288		return IRQ_HANDLED;
3289
3290	napi_schedule(&bnapi->napi);
3291
3292	return IRQ_HANDLED;
3293}
3294
3295static irqreturn_t
3296bnx2_msi_1shot(int irq, void *dev_instance)
3297{
3298	struct bnx2_napi *bnapi = dev_instance;
3299	struct bnx2 *bp = bnapi->bp;
3300
3301	prefetch(bnapi->status_blk.msi);
3302
3303	/* Return here if interrupt is disabled. */
3304	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3305		return IRQ_HANDLED;
3306
3307	napi_schedule(&bnapi->napi);
3308
3309	return IRQ_HANDLED;
3310}
3311
3312static irqreturn_t
3313bnx2_interrupt(int irq, void *dev_instance)
3314{
3315	struct bnx2_napi *bnapi = dev_instance;
3316	struct bnx2 *bp = bnapi->bp;
3317	struct status_block *sblk = bnapi->status_blk.msi;
3318
3319	/* When using INTx, it is possible for the interrupt to arrive
3320	 * at the CPU before the status block posted prior to the
3321	 * interrupt. Reading a register will flush the status block.
3322	 * When using MSI, the MSI message will always complete after
3323	 * the status block write.
3324	 */
3325	if ((sblk->status_idx == bnapi->last_status_idx) &&
3326	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3327	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3328		return IRQ_NONE;
3329
3330	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3331		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3332		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3333
3334	/* Read back to deassert IRQ immediately to avoid too many
3335	 * spurious interrupts.
3336	 */
3337	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3338
3339	/* Return here if interrupt is shared and is disabled. */
3340	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3341		return IRQ_HANDLED;
3342
3343	if (napi_schedule_prep(&bnapi->napi)) {
3344		bnapi->last_status_idx = sblk->status_idx;
3345		__napi_schedule(&bnapi->napi);
3346	}
3347
3348	return IRQ_HANDLED;
3349}
3350
3351static inline int
3352bnx2_has_fast_work(struct bnx2_napi *bnapi)
3353{
3354	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3355	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3356
3357	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3358	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3359		return 1;
3360	return 0;
3361}
3362
3363#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3364				 STATUS_ATTN_BITS_TIMER_ABORT)
3365
3366static inline int
3367bnx2_has_work(struct bnx2_napi *bnapi)
3368{
3369	struct status_block *sblk = bnapi->status_blk.msi;
3370
3371	if (bnx2_has_fast_work(bnapi))
3372		return 1;
3373
3374#ifdef BCM_CNIC
3375	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3376		return 1;
3377#endif
3378
3379	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3380	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3381		return 1;
3382
3383	return 0;
3384}
3385
3386static void
3387bnx2_chk_missed_msi(struct bnx2 *bp)
3388{
3389	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3390	u32 msi_ctrl;
3391
3392	if (bnx2_has_work(bnapi)) {
3393		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3394		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3395			return;
3396
3397		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3398			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3399			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3400			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3401			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3402		}
3403	}
3404
3405	bp->idle_chk_status_idx = bnapi->last_status_idx;
3406}
3407
3408#ifdef BCM_CNIC
3409static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3410{
3411	struct cnic_ops *c_ops;
3412
3413	if (!bnapi->cnic_present)
3414		return;
3415
3416	rcu_read_lock();
3417	c_ops = rcu_dereference(bp->cnic_ops);
3418	if (c_ops)
3419		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3420						      bnapi->status_blk.msi);
3421	rcu_read_unlock();
3422}
3423#endif
3424
3425static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3426{
3427	struct status_block *sblk = bnapi->status_blk.msi;
3428	u32 status_attn_bits = sblk->status_attn_bits;
3429	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3430
3431	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3432	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3433
3434		bnx2_phy_int(bp, bnapi);
3435
3436		/* This is needed to take care of transient status
3437		 * during link changes.
3438		 */
3439		REG_WR(bp, BNX2_HC_COMMAND,
3440		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3441		REG_RD(bp, BNX2_HC_COMMAND);
3442	}
3443}
3444
3445static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3446			  int work_done, int budget)
3447{
3448	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3449	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3450
3451	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3452		bnx2_tx_int(bp, bnapi, 0);
3453
3454	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3455		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3456
3457	return work_done;
3458}
3459
3460static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3461{
3462	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3463	struct bnx2 *bp = bnapi->bp;
3464	int work_done = 0;
3465	struct status_block_msix *sblk = bnapi->status_blk.msix;
3466
3467	while (1) {
3468		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3469		if (unlikely(work_done >= budget))
3470			break;
3471
3472		bnapi->last_status_idx = sblk->status_idx;
3473		/* status idx must be read before checking for more work. */
3474		rmb();
3475		if (likely(!bnx2_has_fast_work(bnapi))) {
3476
3477			napi_complete(napi);
3478			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3479			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480			       bnapi->last_status_idx);
3481			break;
3482		}
3483	}
3484	return work_done;
3485}
3486
3487static int bnx2_poll(struct napi_struct *napi, int budget)
3488{
3489	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3490	struct bnx2 *bp = bnapi->bp;
3491	int work_done = 0;
3492	struct status_block *sblk = bnapi->status_blk.msi;
3493
3494	while (1) {
3495		bnx2_poll_link(bp, bnapi);
3496
3497		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3498
3499#ifdef BCM_CNIC
3500		bnx2_poll_cnic(bp, bnapi);
3501#endif
3502
3503		/* bnapi->last_status_idx is used below to tell the hw how
3504		 * much work has been processed, so we must read it before
3505		 * checking for more work.
3506		 */
3507		bnapi->last_status_idx = sblk->status_idx;
3508
3509		if (unlikely(work_done >= budget))
3510			break;
3511
3512		rmb();
3513		if (likely(!bnx2_has_work(bnapi))) {
3514			napi_complete(napi);
3515			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3516				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518				       bnapi->last_status_idx);
3519				break;
3520			}
3521			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3524			       bnapi->last_status_idx);
3525
3526			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3527			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3528			       bnapi->last_status_idx);
3529			break;
3530		}
3531	}
3532
3533	return work_done;
3534}
3535
3536/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3537 * from set_multicast.
3538 */
3539static void
3540bnx2_set_rx_mode(struct net_device *dev)
3541{
3542	struct bnx2 *bp = netdev_priv(dev);
3543	u32 rx_mode, sort_mode;
3544	struct netdev_hw_addr *ha;
3545	int i;
3546
3547	if (!netif_running(dev))
3548		return;
3549
3550	spin_lock_bh(&bp->phy_lock);
3551
3552	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3553				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3554	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3555	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
3556	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3557		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3558	if (dev->flags & IFF_PROMISC) {
3559		/* Promiscuous mode. */
3560		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3561		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3562			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3563	}
3564	else if (dev->flags & IFF_ALLMULTI) {
3565		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3566			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567			       0xffffffff);
3568        	}
3569		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570	}
3571	else {
3572		/* Accept one or more multicast(s). */
3573		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3574		u32 regidx;
3575		u32 bit;
3576		u32 crc;
3577
3578		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3579
3580		netdev_for_each_mc_addr(ha, dev) {
3581			crc = ether_crc_le(ETH_ALEN, ha->addr);
3582			bit = crc & 0xff;
3583			regidx = (bit & 0xe0) >> 5;
3584			bit &= 0x1f;
3585			mc_filter[regidx] |= (1 << bit);
3586		}
3587
3588		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3589			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3590			       mc_filter[i]);
3591		}
3592
3593		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594	}
3595
3596	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3597		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3598		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3599			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3600	} else if (!(dev->flags & IFF_PROMISC)) {
3601		/* Add all entries into to the match filter list */
3602		i = 0;
3603		netdev_for_each_uc_addr(ha, dev) {
3604			bnx2_set_mac_addr(bp, ha->addr,
3605					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3606			sort_mode |= (1 <<
3607				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3608			i++;
3609		}
3610
3611	}
3612
3613	if (rx_mode != bp->rx_mode) {
3614		bp->rx_mode = rx_mode;
3615		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616	}
3617
3618	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3619	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3620	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3621
3622	spin_unlock_bh(&bp->phy_lock);
3623}
3624
3625static int __devinit
3626check_fw_section(const struct firmware *fw,
3627		 const struct bnx2_fw_file_section *section,
3628		 u32 alignment, bool non_empty)
3629{
3630	u32 offset = be32_to_cpu(section->offset);
3631	u32 len = be32_to_cpu(section->len);
3632
3633	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3634		return -EINVAL;
3635	if ((non_empty && len == 0) || len > fw->size - offset ||
3636	    len & (alignment - 1))
3637		return -EINVAL;
3638	return 0;
3639}
3640
3641static int __devinit
3642check_mips_fw_entry(const struct firmware *fw,
3643		    const struct bnx2_mips_fw_file_entry *entry)
3644{
3645	if (check_fw_section(fw, &entry->text, 4, true) ||
3646	    check_fw_section(fw, &entry->data, 4, false) ||
3647	    check_fw_section(fw, &entry->rodata, 4, false))
3648		return -EINVAL;
3649	return 0;
3650}
3651
3652static int __devinit
3653bnx2_request_firmware(struct bnx2 *bp)
3654{
3655	const char *mips_fw_file, *rv2p_fw_file;
3656	const struct bnx2_mips_fw_file *mips_fw;
3657	const struct bnx2_rv2p_fw_file *rv2p_fw;
3658	int rc;
3659
3660	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3661		mips_fw_file = FW_MIPS_FILE_09;
3662		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3663		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3664			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3665		else
3666			rv2p_fw_file = FW_RV2P_FILE_09;
3667	} else {
3668		mips_fw_file = FW_MIPS_FILE_06;
3669		rv2p_fw_file = FW_RV2P_FILE_06;
3670	}
3671
3672	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3673	if (rc) {
3674		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3675		return rc;
3676	}
3677
3678	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3679	if (rc) {
3680		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3681		return rc;
3682	}
3683	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3684	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3685	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3686	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3687	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3688	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3689	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3690	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3691		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3692		return -EINVAL;
3693	}
3694	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3695	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3696	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3697		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3698		return -EINVAL;
3699	}
3700
3701	return 0;
3702}
3703
3704static u32
3705rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3706{
3707	switch (idx) {
3708	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3709		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3710		rv2p_code |= RV2P_BD_PAGE_SIZE;
3711		break;
3712	}
3713	return rv2p_code;
3714}
3715
3716static int
3717load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3718	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3719{
3720	u32 rv2p_code_len, file_offset;
3721	__be32 *rv2p_code;
3722	int i;
3723	u32 val, cmd, addr;
3724
3725	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3726	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3727
3728	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3729
3730	if (rv2p_proc == RV2P_PROC1) {
3731		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3732		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3733	} else {
3734		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3735		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3736	}
3737
3738	for (i = 0; i < rv2p_code_len; i += 8) {
3739		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3740		rv2p_code++;
3741		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3742		rv2p_code++;
3743
3744		val = (i / 8) | cmd;
3745		REG_WR(bp, addr, val);
3746	}
3747
3748	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3749	for (i = 0; i < 8; i++) {
3750		u32 loc, code;
3751
3752		loc = be32_to_cpu(fw_entry->fixup[i]);
3753		if (loc && ((loc * 4) < rv2p_code_len)) {
3754			code = be32_to_cpu(*(rv2p_code + loc - 1));
3755			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3756			code = be32_to_cpu(*(rv2p_code + loc));
3757			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3758			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3759
3760			val = (loc / 2) | cmd;
3761			REG_WR(bp, addr, val);
3762		}
3763	}
3764
3765	/* Reset the processor, un-stall is done later. */
3766	if (rv2p_proc == RV2P_PROC1) {
3767		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3768	}
3769	else {
3770		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3771	}
3772
3773	return 0;
3774}
3775
3776static int
3777load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3778	    const struct bnx2_mips_fw_file_entry *fw_entry)
3779{
3780	u32 addr, len, file_offset;
3781	__be32 *data;
3782	u32 offset;
3783	u32 val;
3784
3785	/* Halt the CPU. */
3786	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3787	val |= cpu_reg->mode_value_halt;
3788	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3789	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3790
3791	/* Load the Text area. */
3792	addr = be32_to_cpu(fw_entry->text.addr);
3793	len = be32_to_cpu(fw_entry->text.len);
3794	file_offset = be32_to_cpu(fw_entry->text.offset);
3795	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3796
3797	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3798	if (len) {
3799		int j;
3800
3801		for (j = 0; j < (len / 4); j++, offset += 4)
3802			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3803	}
3804
3805	/* Load the Data area. */
3806	addr = be32_to_cpu(fw_entry->data.addr);
3807	len = be32_to_cpu(fw_entry->data.len);
3808	file_offset = be32_to_cpu(fw_entry->data.offset);
3809	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3810
3811	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3812	if (len) {
3813		int j;
3814
3815		for (j = 0; j < (len / 4); j++, offset += 4)
3816			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3817	}
3818
3819	/* Load the Read-Only area. */
3820	addr = be32_to_cpu(fw_entry->rodata.addr);
3821	len = be32_to_cpu(fw_entry->rodata.len);
3822	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3823	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3824
3825	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3826	if (len) {
3827		int j;
3828
3829		for (j = 0; j < (len / 4); j++, offset += 4)
3830			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3831	}
3832
3833	/* Clear the pre-fetch instruction. */
3834	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3835
3836	val = be32_to_cpu(fw_entry->start_addr);
3837	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3838
3839	/* Start the CPU. */
3840	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3841	val &= ~cpu_reg->mode_value_halt;
3842	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3843	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3844
3845	return 0;
3846}
3847
3848static int
3849bnx2_init_cpus(struct bnx2 *bp)
3850{
3851	const struct bnx2_mips_fw_file *mips_fw =
3852		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3853	const struct bnx2_rv2p_fw_file *rv2p_fw =
3854		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3855	int rc;
3856
3857	/* Initialize the RV2P processor. */
3858	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3859	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3860
3861	/* Initialize the RX Processor. */
3862	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3863	if (rc)
3864		goto init_cpu_err;
3865
3866	/* Initialize the TX Processor. */
3867	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3868	if (rc)
3869		goto init_cpu_err;
3870
3871	/* Initialize the TX Patch-up Processor. */
3872	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3873	if (rc)
3874		goto init_cpu_err;
3875
3876	/* Initialize the Completion Processor. */
3877	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3878	if (rc)
3879		goto init_cpu_err;
3880
3881	/* Initialize the Command Processor. */
3882	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3883
3884init_cpu_err:
3885	return rc;
3886}
3887
3888static int
3889bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3890{
3891	u16 pmcsr;
3892
3893	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3894
3895	switch (state) {
3896	case PCI_D0: {
3897		u32 val;
3898
3899		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3900			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3901			PCI_PM_CTRL_PME_STATUS);
3902
3903		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3904			/* delay required during transition out of D3hot */
3905			msleep(20);
3906
3907		val = REG_RD(bp, BNX2_EMAC_MODE);
3908		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3909		val &= ~BNX2_EMAC_MODE_MPKT;
3910		REG_WR(bp, BNX2_EMAC_MODE, val);
3911
3912		val = REG_RD(bp, BNX2_RPM_CONFIG);
3913		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3914		REG_WR(bp, BNX2_RPM_CONFIG, val);
3915		break;
3916	}
3917	case PCI_D3hot: {
3918		int i;
3919		u32 val, wol_msg;
3920
3921		if (bp->wol) {
3922			u32 advertising;
3923			u8 autoneg;
3924
3925			autoneg = bp->autoneg;
3926			advertising = bp->advertising;
3927
3928			if (bp->phy_port == PORT_TP) {
3929				bp->autoneg = AUTONEG_SPEED;
3930				bp->advertising = ADVERTISED_10baseT_Half |
3931					ADVERTISED_10baseT_Full |
3932					ADVERTISED_100baseT_Half |
3933					ADVERTISED_100baseT_Full |
3934					ADVERTISED_Autoneg;
3935			}
3936
3937			spin_lock_bh(&bp->phy_lock);
3938			bnx2_setup_phy(bp, bp->phy_port);
3939			spin_unlock_bh(&bp->phy_lock);
3940
3941			bp->autoneg = autoneg;
3942			bp->advertising = advertising;
3943
3944			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3945
3946			val = REG_RD(bp, BNX2_EMAC_MODE);
3947
3948			/* Enable port mode. */
3949			val &= ~BNX2_EMAC_MODE_PORT;
3950			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3951			       BNX2_EMAC_MODE_ACPI_RCVD |
3952			       BNX2_EMAC_MODE_MPKT;
3953			if (bp->phy_port == PORT_TP)
3954				val |= BNX2_EMAC_MODE_PORT_MII;
3955			else {
3956				val |= BNX2_EMAC_MODE_PORT_GMII;
3957				if (bp->line_speed == SPEED_2500)
3958					val |= BNX2_EMAC_MODE_25G_MODE;
3959			}
3960
3961			REG_WR(bp, BNX2_EMAC_MODE, val);
3962
3963			/* receive all multicast */
3964			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3965				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3966				       0xffffffff);
3967			}
3968			REG_WR(bp, BNX2_EMAC_RX_MODE,
3969			       BNX2_EMAC_RX_MODE_SORT_MODE);
3970
3971			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3972			      BNX2_RPM_SORT_USER0_MC_EN;
3973			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3974			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3975			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3976			       BNX2_RPM_SORT_USER0_ENA);
3977
3978			/* Need to enable EMAC and RPM for WOL. */
3979			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3980			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3981			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3982			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3983
3984			val = REG_RD(bp, BNX2_RPM_CONFIG);
3985			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3986			REG_WR(bp, BNX2_RPM_CONFIG, val);
3987
3988			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3989		}
3990		else {
3991			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3992		}
3993
3994		if (!(bp->flags & BNX2_FLAG_NO_WOL))
3995			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3996				     1, 0);
3997
3998		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3999		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4000		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4001
4002			if (bp->wol)
4003				pmcsr |= 3;
4004		}
4005		else {
4006			pmcsr |= 3;
4007		}
4008		if (bp->wol) {
4009			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4010		}
4011		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4012				      pmcsr);
4013
4014		/* No more memory access after this point until
4015		 * device is brought back to D0.
4016		 */
4017		udelay(50);
4018		break;
4019	}
4020	default:
4021		return -EINVAL;
4022	}
4023	return 0;
4024}
4025
4026static int
4027bnx2_acquire_nvram_lock(struct bnx2 *bp)
4028{
4029	u32 val;
4030	int j;
4031
4032	/* Request access to the flash interface. */
4033	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4034	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4035		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4036		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4037			break;
4038
4039		udelay(5);
4040	}
4041
4042	if (j >= NVRAM_TIMEOUT_COUNT)
4043		return -EBUSY;
4044
4045	return 0;
4046}
4047
4048static int
4049bnx2_release_nvram_lock(struct bnx2 *bp)
4050{
4051	int j;
4052	u32 val;
4053
4054	/* Relinquish nvram interface. */
4055	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4056
4057	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4058		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4059		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4060			break;
4061
4062		udelay(5);
4063	}
4064
4065	if (j >= NVRAM_TIMEOUT_COUNT)
4066		return -EBUSY;
4067
4068	return 0;
4069}
4070
4071
4072static int
4073bnx2_enable_nvram_write(struct bnx2 *bp)
4074{
4075	u32 val;
4076
4077	val = REG_RD(bp, BNX2_MISC_CFG);
4078	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4079
4080	if (bp->flash_info->flags & BNX2_NV_WREN) {
4081		int j;
4082
4083		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4084		REG_WR(bp, BNX2_NVM_COMMAND,
4085		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4086
4087		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088			udelay(5);
4089
4090			val = REG_RD(bp, BNX2_NVM_COMMAND);
4091			if (val & BNX2_NVM_COMMAND_DONE)
4092				break;
4093		}
4094
4095		if (j >= NVRAM_TIMEOUT_COUNT)
4096			return -EBUSY;
4097	}
4098	return 0;
4099}
4100
4101static void
4102bnx2_disable_nvram_write(struct bnx2 *bp)
4103{
4104	u32 val;
4105
4106	val = REG_RD(bp, BNX2_MISC_CFG);
4107	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4108}
4109
4110
4111static void
4112bnx2_enable_nvram_access(struct bnx2 *bp)
4113{
4114	u32 val;
4115
4116	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4117	/* Enable both bits, even on read. */
4118	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4119	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4120}
4121
4122static void
4123bnx2_disable_nvram_access(struct bnx2 *bp)
4124{
4125	u32 val;
4126
4127	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4128	/* Disable both bits, even after read. */
4129	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4130		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4131			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4132}
4133
4134static int
4135bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4136{
4137	u32 cmd;
4138	int j;
4139
4140	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4141		/* Buffered flash, no erase needed */
4142		return 0;
4143
4144	/* Build an erase command */
4145	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4146	      BNX2_NVM_COMMAND_DOIT;
4147
4148	/* Need to clear DONE bit separately. */
4149	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4150
4151	/* Address of the NVRAM to read from. */
4152	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4153
4154	/* Issue an erase command. */
4155	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4156
4157	/* Wait for completion. */
4158	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4159		u32 val;
4160
4161		udelay(5);
4162
4163		val = REG_RD(bp, BNX2_NVM_COMMAND);
4164		if (val & BNX2_NVM_COMMAND_DONE)
4165			break;
4166	}
4167
4168	if (j >= NVRAM_TIMEOUT_COUNT)
4169		return -EBUSY;
4170
4171	return 0;
4172}
4173
4174static int
4175bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4176{
4177	u32 cmd;
4178	int j;
4179
4180	/* Build the command word. */
4181	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4182
4183	/* Calculate an offset of a buffered flash, not needed for 5709. */
4184	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4185		offset = ((offset / bp->flash_info->page_size) <<
4186			   bp->flash_info->page_bits) +
4187			  (offset % bp->flash_info->page_size);
4188	}
4189
4190	/* Need to clear DONE bit separately. */
4191	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4192
4193	/* Address of the NVRAM to read from. */
4194	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4195
4196	/* Issue a read command. */
4197	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4198
4199	/* Wait for completion. */
4200	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4201		u32 val;
4202
4203		udelay(5);
4204
4205		val = REG_RD(bp, BNX2_NVM_COMMAND);
4206		if (val & BNX2_NVM_COMMAND_DONE) {
4207			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4208			memcpy(ret_val, &v, 4);
4209			break;
4210		}
4211	}
4212	if (j >= NVRAM_TIMEOUT_COUNT)
4213		return -EBUSY;
4214
4215	return 0;
4216}
4217
4218
4219static int
4220bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4221{
4222	u32 cmd;
4223	__be32 val32;
4224	int j;
4225
4226	/* Build the command word. */
4227	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4228
4229	/* Calculate an offset of a buffered flash, not needed for 5709. */
4230	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4231		offset = ((offset / bp->flash_info->page_size) <<
4232			  bp->flash_info->page_bits) +
4233			 (offset % bp->flash_info->page_size);
4234	}
4235
4236	/* Need to clear DONE bit separately. */
4237	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4238
4239	memcpy(&val32, val, 4);
4240
4241	/* Write the data. */
4242	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4243
4244	/* Address of the NVRAM to write to. */
4245	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4246
4247	/* Issue the write command. */
4248	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4249
4250	/* Wait for completion. */
4251	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4252		udelay(5);
4253
4254		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4255			break;
4256	}
4257	if (j >= NVRAM_TIMEOUT_COUNT)
4258		return -EBUSY;
4259
4260	return 0;
4261}
4262
4263static int
4264bnx2_init_nvram(struct bnx2 *bp)
4265{
4266	u32 val;
4267	int j, entry_count, rc = 0;
4268	const struct flash_spec *flash;
4269
4270	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4271		bp->flash_info = &flash_5709;
4272		goto get_flash_size;
4273	}
4274
4275	/* Determine the selected interface. */
4276	val = REG_RD(bp, BNX2_NVM_CFG1);
4277
4278	entry_count = ARRAY_SIZE(flash_table);
4279
4280	if (val & 0x40000000) {
4281
4282		/* Flash interface has been reconfigured */
4283		for (j = 0, flash = &flash_table[0]; j < entry_count;
4284		     j++, flash++) {
4285			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4286			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4287				bp->flash_info = flash;
4288				break;
4289			}
4290		}
4291	}
4292	else {
4293		u32 mask;
4294		/* Not yet been reconfigured */
4295
4296		if (val & (1 << 23))
4297			mask = FLASH_BACKUP_STRAP_MASK;
4298		else
4299			mask = FLASH_STRAP_MASK;
4300
4301		for (j = 0, flash = &flash_table[0]; j < entry_count;
4302			j++, flash++) {
4303
4304			if ((val & mask) == (flash->strapping & mask)) {
4305				bp->flash_info = flash;
4306
4307				/* Request access to the flash interface. */
4308				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309					return rc;
4310
4311				/* Enable access to flash interface */
4312				bnx2_enable_nvram_access(bp);
4313
4314				/* Reconfigure the flash interface */
4315				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4316				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4317				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4318				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4319
4320				/* Disable access to flash interface */
4321				bnx2_disable_nvram_access(bp);
4322				bnx2_release_nvram_lock(bp);
4323
4324				break;
4325			}
4326		}
4327	} /* if (val & 0x40000000) */
4328
4329	if (j == entry_count) {
4330		bp->flash_info = NULL;
4331		pr_alert("Unknown flash/EEPROM type\n");
4332		return -ENODEV;
4333	}
4334
4335get_flash_size:
4336	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4337	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4338	if (val)
4339		bp->flash_size = val;
4340	else
4341		bp->flash_size = bp->flash_info->total_size;
4342
4343	return rc;
4344}
4345
4346static int
4347bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4348		int buf_size)
4349{
4350	int rc = 0;
4351	u32 cmd_flags, offset32, len32, extra;
4352
4353	if (buf_size == 0)
4354		return 0;
4355
4356	/* Request access to the flash interface. */
4357	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4358		return rc;
4359
4360	/* Enable access to flash interface */
4361	bnx2_enable_nvram_access(bp);
4362
4363	len32 = buf_size;
4364	offset32 = offset;
4365	extra = 0;
4366
4367	cmd_flags = 0;
4368
4369	if (offset32 & 3) {
4370		u8 buf[4];
4371		u32 pre_len;
4372
4373		offset32 &= ~3;
4374		pre_len = 4 - (offset & 3);
4375
4376		if (pre_len >= len32) {
4377			pre_len = len32;
4378			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4379				    BNX2_NVM_COMMAND_LAST;
4380		}
4381		else {
4382			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383		}
4384
4385		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4386
4387		if (rc)
4388			return rc;
4389
4390		memcpy(ret_buf, buf + (offset & 3), pre_len);
4391
4392		offset32 += 4;
4393		ret_buf += pre_len;
4394		len32 -= pre_len;
4395	}
4396	if (len32 & 3) {
4397		extra = 4 - (len32 & 3);
4398		len32 = (len32 + 4) & ~3;
4399	}
4400
4401	if (len32 == 4) {
4402		u8 buf[4];
4403
4404		if (cmd_flags)
4405			cmd_flags = BNX2_NVM_COMMAND_LAST;
4406		else
4407			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4408				    BNX2_NVM_COMMAND_LAST;
4409
4410		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4411
4412		memcpy(ret_buf, buf, 4 - extra);
4413	}
4414	else if (len32 > 0) {
4415		u8 buf[4];
4416
4417		/* Read the first word. */
4418		if (cmd_flags)
4419			cmd_flags = 0;
4420		else
4421			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4422
4423		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4424
4425		/* Advance to the next dword. */
4426		offset32 += 4;
4427		ret_buf += 4;
4428		len32 -= 4;
4429
4430		while (len32 > 4 && rc == 0) {
4431			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4432
4433			/* Advance to the next dword. */
4434			offset32 += 4;
4435			ret_buf += 4;
4436			len32 -= 4;
4437		}
4438
4439		if (rc)
4440			return rc;
4441
4442		cmd_flags = BNX2_NVM_COMMAND_LAST;
4443		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4444
4445		memcpy(ret_buf, buf, 4 - extra);
4446	}
4447
4448	/* Disable access to flash interface */
4449	bnx2_disable_nvram_access(bp);
4450
4451	bnx2_release_nvram_lock(bp);
4452
4453	return rc;
4454}
4455
4456static int
4457bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4458		int buf_size)
4459{
4460	u32 written, offset32, len32;
4461	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4462	int rc = 0;
4463	int align_start, align_end;
4464
4465	buf = data_buf;
4466	offset32 = offset;
4467	len32 = buf_size;
4468	align_start = align_end = 0;
4469
4470	if ((align_start = (offset32 & 3))) {
4471		offset32 &= ~3;
4472		len32 += align_start;
4473		if (len32 < 4)
4474			len32 = 4;
4475		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4476			return rc;
4477	}
4478
4479	if (len32 & 3) {
4480		align_end = 4 - (len32 & 3);
4481		len32 += align_end;
4482		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4483			return rc;
4484	}
4485
4486	if (align_start || align_end) {
4487		align_buf = kmalloc(len32, GFP_KERNEL);
4488		if (align_buf == NULL)
4489			return -ENOMEM;
4490		if (align_start) {
4491			memcpy(align_buf, start, 4);
4492		}
4493		if (align_end) {
4494			memcpy(align_buf + len32 - 4, end, 4);
4495		}
4496		memcpy(align_buf + align_start, data_buf, buf_size);
4497		buf = align_buf;
4498	}
4499
4500	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4501		flash_buffer = kmalloc(264, GFP_KERNEL);
4502		if (flash_buffer == NULL) {
4503			rc = -ENOMEM;
4504			goto nvram_write_end;
4505		}
4506	}
4507
4508	written = 0;
4509	while ((written < len32) && (rc == 0)) {
4510		u32 page_start, page_end, data_start, data_end;
4511		u32 addr, cmd_flags;
4512		int i;
4513
4514	        /* Find the page_start addr */
4515		page_start = offset32 + written;
4516		page_start -= (page_start % bp->flash_info->page_size);
4517		/* Find the page_end addr */
4518		page_end = page_start + bp->flash_info->page_size;
4519		/* Find the data_start addr */
4520		data_start = (written == 0) ? offset32 : page_start;
4521		/* Find the data_end addr */
4522		data_end = (page_end > offset32 + len32) ?
4523			(offset32 + len32) : page_end;
4524
4525		/* Request access to the flash interface. */
4526		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4527			goto nvram_write_end;
4528
4529		/* Enable access to flash interface */
4530		bnx2_enable_nvram_access(bp);
4531
4532		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4533		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4534			int j;
4535
4536			/* Read the whole page into the buffer
4537			 * (non-buffer flash only) */
4538			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4539				if (j == (bp->flash_info->page_size - 4)) {
4540					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4541				}
4542				rc = bnx2_nvram_read_dword(bp,
4543					page_start + j,
4544					&flash_buffer[j],
4545					cmd_flags);
4546
4547				if (rc)
4548					goto nvram_write_end;
4549
4550				cmd_flags = 0;
4551			}
4552		}
4553
4554		/* Enable writes to flash interface (unlock write-protect) */
4555		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4556			goto nvram_write_end;
4557
4558		/* Loop to write back the buffer data from page_start to
4559		 * data_start */
4560		i = 0;
4561		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4562			/* Erase the page */
4563			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4564				goto nvram_write_end;
4565
4566			/* Re-enable the write again for the actual write */
4567			bnx2_enable_nvram_write(bp);
4568
4569			for (addr = page_start; addr < data_start;
4570				addr += 4, i += 4) {
4571
4572				rc = bnx2_nvram_write_dword(bp, addr,
4573					&flash_buffer[i], cmd_flags);
4574
4575				if (rc != 0)
4576					goto nvram_write_end;
4577
4578				cmd_flags = 0;
4579			}
4580		}
4581
4582		/* Loop to write the new data from data_start to data_end */
4583		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4584			if ((addr == page_end - 4) ||
4585				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4586				 (addr == data_end - 4))) {
4587
4588				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4589			}
4590			rc = bnx2_nvram_write_dword(bp, addr, buf,
4591				cmd_flags);
4592
4593			if (rc != 0)
4594				goto nvram_write_end;
4595
4596			cmd_flags = 0;
4597			buf += 4;
4598		}
4599
4600		/* Loop to write back the buffer data from data_end
4601		 * to page_end */
4602		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603			for (addr = data_end; addr < page_end;
4604				addr += 4, i += 4) {
4605
4606				if (addr == page_end-4) {
4607					cmd_flags = BNX2_NVM_COMMAND_LAST;
4608                		}
4609				rc = bnx2_nvram_write_dword(bp, addr,
4610					&flash_buffer[i], cmd_flags);
4611
4612				if (rc != 0)
4613					goto nvram_write_end;
4614
4615				cmd_flags = 0;
4616			}
4617		}
4618
4619		/* Disable writes to flash interface (lock write-protect) */
4620		bnx2_disable_nvram_write(bp);
4621
4622		/* Disable access to flash interface */
4623		bnx2_disable_nvram_access(bp);
4624		bnx2_release_nvram_lock(bp);
4625
4626		/* Increment written */
4627		written += data_end - data_start;
4628	}
4629
4630nvram_write_end:
4631	kfree(flash_buffer);
4632	kfree(align_buf);
4633	return rc;
4634}
4635
4636static void
4637bnx2_init_fw_cap(struct bnx2 *bp)
4638{
4639	u32 val, sig = 0;
4640
4641	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4642	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4643
4644	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4645		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4646
4647	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4648	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4649		return;
4650
4651	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4652		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4653		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4654	}
4655
4656	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4657	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4658		u32 link;
4659
4660		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4661
4662		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4663		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4664			bp->phy_port = PORT_FIBRE;
4665		else
4666			bp->phy_port = PORT_TP;
4667
4668		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4669		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4670	}
4671
4672	if (netif_running(bp->dev) && sig)
4673		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4674}
4675
4676static void
4677bnx2_setup_msix_tbl(struct bnx2 *bp)
4678{
4679	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4680
4681	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4682	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4683}
4684
4685static int
4686bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4687{
4688	u32 val;
4689	int i, rc = 0;
4690	u8 old_port;
4691
4692	/* Wait for the current PCI transaction to complete before
4693	 * issuing a reset. */
4694	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4695	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4696		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4697		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4698		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4699		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4700		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4701		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4702		udelay(5);
4703	} else {  /* 5709 */
4704		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4705		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4706		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4707		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4708
4709		for (i = 0; i < 100; i++) {
4710			msleep(1);
4711			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4712			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4713				break;
4714		}
4715	}
4716
4717	/* Wait for the firmware to tell us it is ok to issue a reset. */
4718	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4719
4720	/* Deposit a driver reset signature so the firmware knows that
4721	 * this is a soft reset. */
4722	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4723		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4724
4725	/* Do a dummy read to force the chip to complete all current transaction
4726	 * before we issue a reset. */
4727	val = REG_RD(bp, BNX2_MISC_ID);
4728
4729	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4730		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4731		REG_RD(bp, BNX2_MISC_COMMAND);
4732		udelay(5);
4733
4734		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4735		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4736
4737		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4738
4739	} else {
4740		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4741		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4742		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4743
4744		/* Chip reset. */
4745		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4746
4747		/* Reading back any register after chip reset will hang the
4748		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4749		 * of margin for write posting.
4750		 */
4751		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4752		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4753			msleep(20);
4754
4755		/* Reset takes approximate 30 usec */
4756		for (i = 0; i < 10; i++) {
4757			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4758			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4759				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4760				break;
4761			udelay(10);
4762		}
4763
4764		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4765			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4766			pr_err("Chip reset did not complete\n");
4767			return -EBUSY;
4768		}
4769	}
4770
4771	/* Make sure byte swapping is properly configured. */
4772	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4773	if (val != 0x01020304) {
4774		pr_err("Chip not in correct endian mode\n");
4775		return -ENODEV;
4776	}
4777
4778	/* Wait for the firmware to finish its initialization. */
4779	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4780	if (rc)
4781		return rc;
4782
4783	spin_lock_bh(&bp->phy_lock);
4784	old_port = bp->phy_port;
4785	bnx2_init_fw_cap(bp);
4786	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4787	    old_port != bp->phy_port)
4788		bnx2_set_default_remote_link(bp);
4789	spin_unlock_bh(&bp->phy_lock);
4790
4791	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4792		/* Adjust the voltage regular to two steps lower.  The default
4793		 * of this register is 0x0000000e. */
4794		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4795
4796		/* Remove bad rbuf memory from the free pool. */
4797		rc = bnx2_alloc_bad_rbuf(bp);
4798	}
4799
4800	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4801		bnx2_setup_msix_tbl(bp);
4802		/* Prevent MSIX table reads and write from timing out */
4803		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4804			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4805	}
4806
4807	return rc;
4808}
4809
4810static int
4811bnx2_init_chip(struct bnx2 *bp)
4812{
4813	u32 val, mtu;
4814	int rc, i;
4815
4816	/* Make sure the interrupt is not active. */
4817	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4818
4819	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4820	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4821#ifdef __BIG_ENDIAN
4822	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4823#endif
4824	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4825	      DMA_READ_CHANS << 12 |
4826	      DMA_WRITE_CHANS << 16;
4827
4828	val |= (0x2 << 20) | (1 << 11);
4829
4830	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4831		val |= (1 << 23);
4832
4833	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4834	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4835		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4836
4837	REG_WR(bp, BNX2_DMA_CONFIG, val);
4838
4839	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4840		val = REG_RD(bp, BNX2_TDMA_CONFIG);
4841		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4842		REG_WR(bp, BNX2_TDMA_CONFIG, val);
4843	}
4844
4845	if (bp->flags & BNX2_FLAG_PCIX) {
4846		u16 val16;
4847
4848		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4849				     &val16);
4850		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4851				      val16 & ~PCI_X_CMD_ERO);
4852	}
4853
4854	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4855	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4856	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4857	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4858
4859	/* Initialize context mapping and zero out the quick contexts.  The
4860	 * context block must have already been enabled. */
4861	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4862		rc = bnx2_init_5709_context(bp);
4863		if (rc)
4864			return rc;
4865	} else
4866		bnx2_init_context(bp);
4867
4868	if ((rc = bnx2_init_cpus(bp)) != 0)
4869		return rc;
4870
4871	bnx2_init_nvram(bp);
4872
4873	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4874
4875	val = REG_RD(bp, BNX2_MQ_CONFIG);
4876	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4877	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4878	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4879		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4880		if (CHIP_REV(bp) == CHIP_REV_Ax)
4881			val |= BNX2_MQ_CONFIG_HALT_DIS;
4882	}
4883
4884	REG_WR(bp, BNX2_MQ_CONFIG, val);
4885
4886	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4887	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4888	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4889
4890	val = (BCM_PAGE_BITS - 8) << 24;
4891	REG_WR(bp, BNX2_RV2P_CONFIG, val);
4892
4893	/* Configure page size. */
4894	val = REG_RD(bp, BNX2_TBDR_CONFIG);
4895	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4896	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4897	REG_WR(bp, BNX2_TBDR_CONFIG, val);
4898
4899	val = bp->mac_addr[0] +
4900	      (bp->mac_addr[1] << 8) +
4901	      (bp->mac_addr[2] << 16) +
4902	      bp->mac_addr[3] +
4903	      (bp->mac_addr[4] << 8) +
4904	      (bp->mac_addr[5] << 16);
4905	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4906
4907	/* Program the MTU.  Also include 4 bytes for CRC32. */
4908	mtu = bp->dev->mtu;
4909	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4910	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4911		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4912	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4913
4914	if (mtu < 1500)
4915		mtu = 1500;
4916
4917	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4918	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4919	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4920
4921	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4922	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4923		bp->bnx2_napi[i].last_status_idx = 0;
4924
4925	bp->idle_chk_status_idx = 0xffff;
4926
4927	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4928
4929	/* Set up how to generate a link change interrupt. */
4930	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4931
4932	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4933	       (u64) bp->status_blk_mapping & 0xffffffff);
4934	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4935
4936	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4937	       (u64) bp->stats_blk_mapping & 0xffffffff);
4938	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4939	       (u64) bp->stats_blk_mapping >> 32);
4940
4941	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4942	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4943
4944	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4945	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4946
4947	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4948	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4949
4950	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4951
4952	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4953
4954	REG_WR(bp, BNX2_HC_COM_TICKS,
4955	       (bp->com_ticks_int << 16) | bp->com_ticks);
4956
4957	REG_WR(bp, BNX2_HC_CMD_TICKS,
4958	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4959
4960	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4961		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4962	else
4963		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4964	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4965
4966	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4967		val = BNX2_HC_CONFIG_COLLECT_STATS;
4968	else {
4969		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4970		      BNX2_HC_CONFIG_COLLECT_STATS;
4971	}
4972
4973	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4974		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4975		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4976
4977		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4978	}
4979
4980	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4981		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4982
4983	REG_WR(bp, BNX2_HC_CONFIG, val);
4984
4985	if (bp->rx_ticks < 25)
4986		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
4987	else
4988		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
4989
4990	for (i = 1; i < bp->irq_nvecs; i++) {
4991		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4992			   BNX2_HC_SB_CONFIG_1;
4993
4994		REG_WR(bp, base,
4995			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4996			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4997			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4998
4999		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5000			(bp->tx_quick_cons_trip_int << 16) |
5001			 bp->tx_quick_cons_trip);
5002
5003		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5004			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5005
5006		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5007		       (bp->rx_quick_cons_trip_int << 16) |
5008			bp->rx_quick_cons_trip);
5009
5010		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5011			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5012	}
5013
5014	/* Clear internal stats counters. */
5015	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5016
5017	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5018
5019	/* Initialize the receive filter. */
5020	bnx2_set_rx_mode(bp->dev);
5021
5022	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5023		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5024		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5025		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5026	}
5027	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5028			  1, 0);
5029
5030	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5031	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5032
5033	udelay(20);
5034
5035	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5036
5037	return rc;
5038}
5039
5040static void
5041bnx2_clear_ring_states(struct bnx2 *bp)
5042{
5043	struct bnx2_napi *bnapi;
5044	struct bnx2_tx_ring_info *txr;
5045	struct bnx2_rx_ring_info *rxr;
5046	int i;
5047
5048	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5049		bnapi = &bp->bnx2_napi[i];
5050		txr = &bnapi->tx_ring;
5051		rxr = &bnapi->rx_ring;
5052
5053		txr->tx_cons = 0;
5054		txr->hw_tx_cons = 0;
5055		rxr->rx_prod_bseq = 0;
5056		rxr->rx_prod = 0;
5057		rxr->rx_cons = 0;
5058		rxr->rx_pg_prod = 0;
5059		rxr->rx_pg_cons = 0;
5060	}
5061}
5062
5063static void
5064bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5065{
5066	u32 val, offset0, offset1, offset2, offset3;
5067	u32 cid_addr = GET_CID_ADDR(cid);
5068
5069	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5070		offset0 = BNX2_L2CTX_TYPE_XI;
5071		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5072		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5073		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5074	} else {
5075		offset0 = BNX2_L2CTX_TYPE;
5076		offset1 = BNX2_L2CTX_CMD_TYPE;
5077		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5078		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5079	}
5080	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5081	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5082
5083	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5084	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5085
5086	val = (u64) txr->tx_desc_mapping >> 32;
5087	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5088
5089	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5090	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5091}
5092
5093static void
5094bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5095{
5096	struct tx_bd *txbd;
5097	u32 cid = TX_CID;
5098	struct bnx2_napi *bnapi;
5099	struct bnx2_tx_ring_info *txr;
5100
5101	bnapi = &bp->bnx2_napi[ring_num];
5102	txr = &bnapi->tx_ring;
5103
5104	if (ring_num == 0)
5105		cid = TX_CID;
5106	else
5107		cid = TX_TSS_CID + ring_num - 1;
5108
5109	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5110
5111	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5112
5113	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5114	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5115
5116	txr->tx_prod = 0;
5117	txr->tx_prod_bseq = 0;
5118
5119	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5120	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5121
5122	bnx2_init_tx_context(bp, cid, txr);
5123}
5124
5125static void
5126bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5127		     int num_rings)
5128{
5129	int i;
5130	struct rx_bd *rxbd;
5131
5132	for (i = 0; i < num_rings; i++) {
5133		int j;
5134
5135		rxbd = &rx_ring[i][0];
5136		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5137			rxbd->rx_bd_len = buf_size;
5138			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5139		}
5140		if (i == (num_rings - 1))
5141			j = 0;
5142		else
5143			j = i + 1;
5144		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5145		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5146	}
5147}
5148
5149static void
5150bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5151{
5152	int i;
5153	u16 prod, ring_prod;
5154	u32 cid, rx_cid_addr, val;
5155	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5156	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5157
5158	if (ring_num == 0)
5159		cid = RX_CID;
5160	else
5161		cid = RX_RSS_CID + ring_num - 1;
5162
5163	rx_cid_addr = GET_CID_ADDR(cid);
5164
5165	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5166			     bp->rx_buf_use_size, bp->rx_max_ring);
5167
5168	bnx2_init_rx_context(bp, cid);
5169
5170	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5171		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5172		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5173	}
5174
5175	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5176	if (bp->rx_pg_ring_size) {
5177		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5178				     rxr->rx_pg_desc_mapping,
5179				     PAGE_SIZE, bp->rx_max_pg_ring);
5180		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5181		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5182		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5183		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5184
5185		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5186		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5187
5188		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5189		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5190
5191		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5192			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5193	}
5194
5195	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5196	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5197
5198	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5199	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5200
5201	ring_prod = prod = rxr->rx_pg_prod;
5202	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5203		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5204			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5205				    ring_num, i, bp->rx_pg_ring_size);
5206			break;
5207		}
5208		prod = NEXT_RX_BD(prod);
5209		ring_prod = RX_PG_RING_IDX(prod);
5210	}
5211	rxr->rx_pg_prod = prod;
5212
5213	ring_prod = prod = rxr->rx_prod;
5214	for (i = 0; i < bp->rx_ring_size; i++) {
5215		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5216			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5217				    ring_num, i, bp->rx_ring_size);
5218			break;
5219		}
5220		prod = NEXT_RX_BD(prod);
5221		ring_prod = RX_RING_IDX(prod);
5222	}
5223	rxr->rx_prod = prod;
5224
5225	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5226	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5227	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5228
5229	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5230	REG_WR16(bp, rxr->rx_bidx_addr, prod);
5231
5232	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5233}
5234
5235static void
5236bnx2_init_all_rings(struct bnx2 *bp)
5237{
5238	int i;
5239	u32 val;
5240
5241	bnx2_clear_ring_states(bp);
5242
5243	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5244	for (i = 0; i < bp->num_tx_rings; i++)
5245		bnx2_init_tx_ring(bp, i);
5246
5247	if (bp->num_tx_rings > 1)
5248		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5249		       (TX_TSS_CID << 7));
5250
5251	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5252	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5253
5254	for (i = 0; i < bp->num_rx_rings; i++)
5255		bnx2_init_rx_ring(bp, i);
5256
5257	if (bp->num_rx_rings > 1) {
5258		u32 tbl_32 = 0;
5259
5260		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5261			int shift = (i % 8) << 2;
5262
5263			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5264			if ((i % 8) == 7) {
5265				REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5266				REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5267					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5268					BNX2_RLUP_RSS_COMMAND_WRITE |
5269					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5270				tbl_32 = 0;
5271			}
5272		}
5273
5274		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5275		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5276
5277		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5278
5279	}
5280}
5281
5282static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5283{
5284	u32 max, num_rings = 1;
5285
5286	while (ring_size > MAX_RX_DESC_CNT) {
5287		ring_size -= MAX_RX_DESC_CNT;
5288		num_rings++;
5289	}
5290	/* round to next power of 2 */
5291	max = max_size;
5292	while ((max & num_rings) == 0)
5293		max >>= 1;
5294
5295	if (num_rings != max)
5296		max <<= 1;
5297
5298	return max;
5299}
5300
5301static void
5302bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5303{
5304	u32 rx_size, rx_space, jumbo_size;
5305
5306	/* 8 for CRC and VLAN */
5307	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5308
5309	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5310		sizeof(struct skb_shared_info);
5311
5312	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5313	bp->rx_pg_ring_size = 0;
5314	bp->rx_max_pg_ring = 0;
5315	bp->rx_max_pg_ring_idx = 0;
5316	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5317		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5318
5319		jumbo_size = size * pages;
5320		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5321			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5322
5323		bp->rx_pg_ring_size = jumbo_size;
5324		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5325							MAX_RX_PG_RINGS);
5326		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5327		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5328		bp->rx_copy_thresh = 0;
5329	}
5330
5331	bp->rx_buf_use_size = rx_size;
5332	/* hw alignment */
5333	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5334	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5335	bp->rx_ring_size = size;
5336	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5337	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5338}
5339
5340static void
5341bnx2_free_tx_skbs(struct bnx2 *bp)
5342{
5343	int i;
5344
5345	for (i = 0; i < bp->num_tx_rings; i++) {
5346		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5347		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5348		int j;
5349
5350		if (txr->tx_buf_ring == NULL)
5351			continue;
5352
5353		for (j = 0; j < TX_DESC_CNT; ) {
5354			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5355			struct sk_buff *skb = tx_buf->skb;
5356			int k, last;
5357
5358			if (skb == NULL) {
5359				j++;
5360				continue;
5361			}
5362
5363			dma_unmap_single(&bp->pdev->dev,
5364					 dma_unmap_addr(tx_buf, mapping),
5365					 skb_headlen(skb),
5366					 PCI_DMA_TODEVICE);
5367
5368			tx_buf->skb = NULL;
5369
5370			last = tx_buf->nr_frags;
5371			j++;
5372			for (k = 0; k < last; k++, j++) {
5373				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5374				dma_unmap_page(&bp->pdev->dev,
5375					dma_unmap_addr(tx_buf, mapping),
5376					skb_shinfo(skb)->frags[k].size,
5377					PCI_DMA_TODEVICE);
5378			}
5379			dev_kfree_skb(skb);
5380		}
5381	}
5382}
5383
5384static void
5385bnx2_free_rx_skbs(struct bnx2 *bp)
5386{
5387	int i;
5388
5389	for (i = 0; i < bp->num_rx_rings; i++) {
5390		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5391		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5392		int j;
5393
5394		if (rxr->rx_buf_ring == NULL)
5395			return;
5396
5397		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5398			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5399			struct sk_buff *skb = rx_buf->skb;
5400
5401			if (skb == NULL)
5402				continue;
5403
5404			dma_unmap_single(&bp->pdev->dev,
5405					 dma_unmap_addr(rx_buf, mapping),
5406					 bp->rx_buf_use_size,
5407					 PCI_DMA_FROMDEVICE);
5408
5409			rx_buf->skb = NULL;
5410
5411			dev_kfree_skb(skb);
5412		}
5413		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5414			bnx2_free_rx_page(bp, rxr, j);
5415	}
5416}
5417
5418static void
5419bnx2_free_skbs(struct bnx2 *bp)
5420{
5421	bnx2_free_tx_skbs(bp);
5422	bnx2_free_rx_skbs(bp);
5423}
5424
5425static int
5426bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5427{
5428	int rc;
5429
5430	rc = bnx2_reset_chip(bp, reset_code);
5431	bnx2_free_skbs(bp);
5432	if (rc)
5433		return rc;
5434
5435	if ((rc = bnx2_init_chip(bp)) != 0)
5436		return rc;
5437
5438	bnx2_init_all_rings(bp);
5439	return 0;
5440}
5441
5442static int
5443bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5444{
5445	int rc;
5446
5447	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5448		return rc;
5449
5450	spin_lock_bh(&bp->phy_lock);
5451	bnx2_init_phy(bp, reset_phy);
5452	bnx2_set_link(bp);
5453	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5454		bnx2_remote_phy_event(bp);
5455	spin_unlock_bh(&bp->phy_lock);
5456	return 0;
5457}
5458
5459static int
5460bnx2_shutdown_chip(struct bnx2 *bp)
5461{
5462	u32 reset_code;
5463
5464	if (bp->flags & BNX2_FLAG_NO_WOL)
5465		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5466	else if (bp->wol)
5467		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5468	else
5469		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5470
5471	return bnx2_reset_chip(bp, reset_code);
5472}
5473
5474static int
5475bnx2_test_registers(struct bnx2 *bp)
5476{
5477	int ret;
5478	int i, is_5709;
5479	static const struct {
5480		u16   offset;
5481		u16   flags;
5482#define BNX2_FL_NOT_5709	1
5483		u32   rw_mask;
5484		u32   ro_mask;
5485	} reg_tbl[] = {
5486		{ 0x006c, 0, 0x00000000, 0x0000003f },
5487		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5488		{ 0x0094, 0, 0x00000000, 0x00000000 },
5489
5490		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5491		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5492		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5493		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5494		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5495		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5496		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5497		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5498		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5499
5500		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5501		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5502		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5503		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5504		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5505		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5506
5507		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5508		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5509		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5510
5511		{ 0x1000, 0, 0x00000000, 0x00000001 },
5512		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5513
5514		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5515		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5516		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5517		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5518		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5519		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5520		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5521		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5522		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5523		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5524
5525		{ 0x1800, 0, 0x00000000, 0x00000001 },
5526		{ 0x1804, 0, 0x00000000, 0x00000003 },
5527
5528		{ 0x2800, 0, 0x00000000, 0x00000001 },
5529		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5530		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5531		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5532		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5533		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5534		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5535		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5536		{ 0x2840, 0, 0x00000000, 0xffffffff },
5537		{ 0x2844, 0, 0x00000000, 0xffffffff },
5538		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5539		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5540
5541		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5542		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5543
5544		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5545		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5546		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5547		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5548		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5549		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5550		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5551		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5552		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5553
5554		{ 0x5004, 0, 0x00000000, 0x0000007f },
5555		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5556
5557		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5558		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5559		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5560		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5561		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5562		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5563		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5564		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5565		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5566
5567		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5568		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5569		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5570		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5571		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5572		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5573		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5574		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5575		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5576		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5577		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5578		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5579		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5580		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5581		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5582		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5583		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5584		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5585		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5586		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5587		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5588		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5589		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5590
5591		{ 0xffff, 0, 0x00000000, 0x00000000 },
5592	};
5593
5594	ret = 0;
5595	is_5709 = 0;
5596	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5597		is_5709 = 1;
5598
5599	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5600		u32 offset, rw_mask, ro_mask, save_val, val;
5601		u16 flags = reg_tbl[i].flags;
5602
5603		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5604			continue;
5605
5606		offset = (u32) reg_tbl[i].offset;
5607		rw_mask = reg_tbl[i].rw_mask;
5608		ro_mask = reg_tbl[i].ro_mask;
5609
5610		save_val = readl(bp->regview + offset);
5611
5612		writel(0, bp->regview + offset);
5613
5614		val = readl(bp->regview + offset);
5615		if ((val & rw_mask) != 0) {
5616			goto reg_test_err;
5617		}
5618
5619		if ((val & ro_mask) != (save_val & ro_mask)) {
5620			goto reg_test_err;
5621		}
5622
5623		writel(0xffffffff, bp->regview + offset);
5624
5625		val = readl(bp->regview + offset);
5626		if ((val & rw_mask) != rw_mask) {
5627			goto reg_test_err;
5628		}
5629
5630		if ((val & ro_mask) != (save_val & ro_mask)) {
5631			goto reg_test_err;
5632		}
5633
5634		writel(save_val, bp->regview + offset);
5635		continue;
5636
5637reg_test_err:
5638		writel(save_val, bp->regview + offset);
5639		ret = -ENODEV;
5640		break;
5641	}
5642	return ret;
5643}
5644
5645static int
5646bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5647{
5648	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5649		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5650	int i;
5651
5652	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5653		u32 offset;
5654
5655		for (offset = 0; offset < size; offset += 4) {
5656
5657			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5658
5659			if (bnx2_reg_rd_ind(bp, start + offset) !=
5660				test_pattern[i]) {
5661				return -ENODEV;
5662			}
5663		}
5664	}
5665	return 0;
5666}
5667
5668static int
5669bnx2_test_memory(struct bnx2 *bp)
5670{
5671	int ret = 0;
5672	int i;
5673	static struct mem_entry {
5674		u32   offset;
5675		u32   len;
5676	} mem_tbl_5706[] = {
5677		{ 0x60000,  0x4000 },
5678		{ 0xa0000,  0x3000 },
5679		{ 0xe0000,  0x4000 },
5680		{ 0x120000, 0x4000 },
5681		{ 0x1a0000, 0x4000 },
5682		{ 0x160000, 0x4000 },
5683		{ 0xffffffff, 0    },
5684	},
5685	mem_tbl_5709[] = {
5686		{ 0x60000,  0x4000 },
5687		{ 0xa0000,  0x3000 },
5688		{ 0xe0000,  0x4000 },
5689		{ 0x120000, 0x4000 },
5690		{ 0x1a0000, 0x4000 },
5691		{ 0xffffffff, 0    },
5692	};
5693	struct mem_entry *mem_tbl;
5694
5695	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5696		mem_tbl = mem_tbl_5709;
5697	else
5698		mem_tbl = mem_tbl_5706;
5699
5700	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5701		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5702			mem_tbl[i].len)) != 0) {
5703			return ret;
5704		}
5705	}
5706
5707	return ret;
5708}
5709
5710#define BNX2_MAC_LOOPBACK	0
5711#define BNX2_PHY_LOOPBACK	1
5712
5713static int
5714bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5715{
5716	unsigned int pkt_size, num_pkts, i;
5717	struct sk_buff *skb, *rx_skb;
5718	unsigned char *packet;
5719	u16 rx_start_idx, rx_idx;
5720	dma_addr_t map;
5721	struct tx_bd *txbd;
5722	struct sw_bd *rx_buf;
5723	struct l2_fhdr *rx_hdr;
5724	int ret = -ENODEV;
5725	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5726	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5727	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5728
5729	tx_napi = bnapi;
5730
5731	txr = &tx_napi->tx_ring;
5732	rxr = &bnapi->rx_ring;
5733	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5734		bp->loopback = MAC_LOOPBACK;
5735		bnx2_set_mac_loopback(bp);
5736	}
5737	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5738		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5739			return 0;
5740
5741		bp->loopback = PHY_LOOPBACK;
5742		bnx2_set_phy_loopback(bp);
5743	}
5744	else
5745		return -EINVAL;
5746
5747	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5748	skb = netdev_alloc_skb(bp->dev, pkt_size);
5749	if (!skb)
5750		return -ENOMEM;
5751	packet = skb_put(skb, pkt_size);
5752	memcpy(packet, bp->dev->dev_addr, 6);
5753	memset(packet + 6, 0x0, 8);
5754	for (i = 14; i < pkt_size; i++)
5755		packet[i] = (unsigned char) (i & 0xff);
5756
5757	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5758			     PCI_DMA_TODEVICE);
5759	if (dma_mapping_error(&bp->pdev->dev, map)) {
5760		dev_kfree_skb(skb);
5761		return -EIO;
5762	}
5763
5764	REG_WR(bp, BNX2_HC_COMMAND,
5765	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5766
5767	REG_RD(bp, BNX2_HC_COMMAND);
5768
5769	udelay(5);
5770	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5771
5772	num_pkts = 0;
5773
5774	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5775
5776	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5777	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5778	txbd->tx_bd_mss_nbytes = pkt_size;
5779	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5780
5781	num_pkts++;
5782	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5783	txr->tx_prod_bseq += pkt_size;
5784
5785	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5786	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5787
5788	udelay(100);
5789
5790	REG_WR(bp, BNX2_HC_COMMAND,
5791	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5792
5793	REG_RD(bp, BNX2_HC_COMMAND);
5794
5795	udelay(5);
5796
5797	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5798	dev_kfree_skb(skb);
5799
5800	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5801		goto loopback_test_done;
5802
5803	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5804	if (rx_idx != rx_start_idx + num_pkts) {
5805		goto loopback_test_done;
5806	}
5807
5808	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5809	rx_skb = rx_buf->skb;
5810
5811	rx_hdr = rx_buf->desc;
5812	skb_reserve(rx_skb, BNX2_RX_OFFSET);
5813
5814	dma_sync_single_for_cpu(&bp->pdev->dev,
5815		dma_unmap_addr(rx_buf, mapping),
5816		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5817
5818	if (rx_hdr->l2_fhdr_status &
5819		(L2_FHDR_ERRORS_BAD_CRC |
5820		L2_FHDR_ERRORS_PHY_DECODE |
5821		L2_FHDR_ERRORS_ALIGNMENT |
5822		L2_FHDR_ERRORS_TOO_SHORT |
5823		L2_FHDR_ERRORS_GIANT_FRAME)) {
5824
5825		goto loopback_test_done;
5826	}
5827
5828	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5829		goto loopback_test_done;
5830	}
5831
5832	for (i = 14; i < pkt_size; i++) {
5833		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5834			goto loopback_test_done;
5835		}
5836	}
5837
5838	ret = 0;
5839
5840loopback_test_done:
5841	bp->loopback = 0;
5842	return ret;
5843}
5844
5845#define BNX2_MAC_LOOPBACK_FAILED	1
5846#define BNX2_PHY_LOOPBACK_FAILED	2
5847#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5848					 BNX2_PHY_LOOPBACK_FAILED)
5849
5850static int
5851bnx2_test_loopback(struct bnx2 *bp)
5852{
5853	int rc = 0;
5854
5855	if (!netif_running(bp->dev))
5856		return BNX2_LOOPBACK_FAILED;
5857
5858	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5859	spin_lock_bh(&bp->phy_lock);
5860	bnx2_init_phy(bp, 1);
5861	spin_unlock_bh(&bp->phy_lock);
5862	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5863		rc |= BNX2_MAC_LOOPBACK_FAILED;
5864	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5865		rc |= BNX2_PHY_LOOPBACK_FAILED;
5866	return rc;
5867}
5868
5869#define NVRAM_SIZE 0x200
5870#define CRC32_RESIDUAL 0xdebb20e3
5871
5872static int
5873bnx2_test_nvram(struct bnx2 *bp)
5874{
5875	__be32 buf[NVRAM_SIZE / 4];
5876	u8 *data = (u8 *) buf;
5877	int rc = 0;
5878	u32 magic, csum;
5879
5880	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5881		goto test_nvram_done;
5882
5883        magic = be32_to_cpu(buf[0]);
5884	if (magic != 0x669955aa) {
5885		rc = -ENODEV;
5886		goto test_nvram_done;
5887	}
5888
5889	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5890		goto test_nvram_done;
5891
5892	csum = ether_crc_le(0x100, data);
5893	if (csum != CRC32_RESIDUAL) {
5894		rc = -ENODEV;
5895		goto test_nvram_done;
5896	}
5897
5898	csum = ether_crc_le(0x100, data + 0x100);
5899	if (csum != CRC32_RESIDUAL) {
5900		rc = -ENODEV;
5901	}
5902
5903test_nvram_done:
5904	return rc;
5905}
5906
5907static int
5908bnx2_test_link(struct bnx2 *bp)
5909{
5910	u32 bmsr;
5911
5912	if (!netif_running(bp->dev))
5913		return -ENODEV;
5914
5915	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5916		if (bp->link_up)
5917			return 0;
5918		return -ENODEV;
5919	}
5920	spin_lock_bh(&bp->phy_lock);
5921	bnx2_enable_bmsr1(bp);
5922	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5923	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5924	bnx2_disable_bmsr1(bp);
5925	spin_unlock_bh(&bp->phy_lock);
5926
5927	if (bmsr & BMSR_LSTATUS) {
5928		return 0;
5929	}
5930	return -ENODEV;
5931}
5932
5933static int
5934bnx2_test_intr(struct bnx2 *bp)
5935{
5936	int i;
5937	u16 status_idx;
5938
5939	if (!netif_running(bp->dev))
5940		return -ENODEV;
5941
5942	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5943
5944	/* This register is not touched during run-time. */
5945	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5946	REG_RD(bp, BNX2_HC_COMMAND);
5947
5948	for (i = 0; i < 10; i++) {
5949		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5950			status_idx) {
5951
5952			break;
5953		}
5954
5955		msleep_interruptible(10);
5956	}
5957	if (i < 10)
5958		return 0;
5959
5960	return -ENODEV;
5961}
5962
5963/* Determining link for parallel detection. */
5964static int
5965bnx2_5706_serdes_has_link(struct bnx2 *bp)
5966{
5967	u32 mode_ctl, an_dbg, exp;
5968
5969	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5970		return 0;
5971
5972	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5973	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5974
5975	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5976		return 0;
5977
5978	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5979	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5980	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5981
5982	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5983		return 0;
5984
5985	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5986	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5987	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5988
5989	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
5990		return 0;
5991
5992	return 1;
5993}
5994
5995static void
5996bnx2_5706_serdes_timer(struct bnx2 *bp)
5997{
5998	int check_link = 1;
5999
6000	spin_lock(&bp->phy_lock);
6001	if (bp->serdes_an_pending) {
6002		bp->serdes_an_pending--;
6003		check_link = 0;
6004	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6005		u32 bmcr;
6006
6007		bp->current_interval = BNX2_TIMER_INTERVAL;
6008
6009		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6010
6011		if (bmcr & BMCR_ANENABLE) {
6012			if (bnx2_5706_serdes_has_link(bp)) {
6013				bmcr &= ~BMCR_ANENABLE;
6014				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6015				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6016				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6017			}
6018		}
6019	}
6020	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6021		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6022		u32 phy2;
6023
6024		bnx2_write_phy(bp, 0x17, 0x0f01);
6025		bnx2_read_phy(bp, 0x15, &phy2);
6026		if (phy2 & 0x20) {
6027			u32 bmcr;
6028
6029			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6030			bmcr |= BMCR_ANENABLE;
6031			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6032
6033			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6034		}
6035	} else
6036		bp->current_interval = BNX2_TIMER_INTERVAL;
6037
6038	if (check_link) {
6039		u32 val;
6040
6041		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6042		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6043		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6044
6045		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6046			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6047				bnx2_5706s_force_link_dn(bp, 1);
6048				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6049			} else
6050				bnx2_set_link(bp);
6051		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6052			bnx2_set_link(bp);
6053	}
6054	spin_unlock(&bp->phy_lock);
6055}
6056
6057static void
6058bnx2_5708_serdes_timer(struct bnx2 *bp)
6059{
6060	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6061		return;
6062
6063	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6064		bp->serdes_an_pending = 0;
6065		return;
6066	}
6067
6068	spin_lock(&bp->phy_lock);
6069	if (bp->serdes_an_pending)
6070		bp->serdes_an_pending--;
6071	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6072		u32 bmcr;
6073
6074		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6075		if (bmcr & BMCR_ANENABLE) {
6076			bnx2_enable_forced_2g5(bp);
6077			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6078		} else {
6079			bnx2_disable_forced_2g5(bp);
6080			bp->serdes_an_pending = 2;
6081			bp->current_interval = BNX2_TIMER_INTERVAL;
6082		}
6083
6084	} else
6085		bp->current_interval = BNX2_TIMER_INTERVAL;
6086
6087	spin_unlock(&bp->phy_lock);
6088}
6089
6090static void
6091bnx2_timer(unsigned long data)
6092{
6093	struct bnx2 *bp = (struct bnx2 *) data;
6094
6095	if (!netif_running(bp->dev))
6096		return;
6097
6098	if (atomic_read(&bp->intr_sem) != 0)
6099		goto bnx2_restart_timer;
6100
6101	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6102	     BNX2_FLAG_USING_MSI)
6103		bnx2_chk_missed_msi(bp);
6104
6105	bnx2_send_heart_beat(bp);
6106
6107	bp->stats_blk->stat_FwRxDrop =
6108		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6109
6110	/* workaround occasional corrupted counters */
6111	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6112		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6113					    BNX2_HC_COMMAND_STATS_NOW);
6114
6115	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6116		if (CHIP_NUM(bp) == CHIP_NUM_5706)
6117			bnx2_5706_serdes_timer(bp);
6118		else
6119			bnx2_5708_serdes_timer(bp);
6120	}
6121
6122bnx2_restart_timer:
6123	mod_timer(&bp->timer, jiffies + bp->current_interval);
6124}
6125
6126static int
6127bnx2_request_irq(struct bnx2 *bp)
6128{
6129	unsigned long flags;
6130	struct bnx2_irq *irq;
6131	int rc = 0, i;
6132
6133	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6134		flags = 0;
6135	else
6136		flags = IRQF_SHARED;
6137
6138	for (i = 0; i < bp->irq_nvecs; i++) {
6139		irq = &bp->irq_tbl[i];
6140		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6141				 &bp->bnx2_napi[i]);
6142		if (rc)
6143			break;
6144		irq->requested = 1;
6145	}
6146	return rc;
6147}
6148
6149static void
6150__bnx2_free_irq(struct bnx2 *bp)
6151{
6152	struct bnx2_irq *irq;
6153	int i;
6154
6155	for (i = 0; i < bp->irq_nvecs; i++) {
6156		irq = &bp->irq_tbl[i];
6157		if (irq->requested)
6158			free_irq(irq->vector, &bp->bnx2_napi[i]);
6159		irq->requested = 0;
6160	}
6161}
6162
6163static void
6164bnx2_free_irq(struct bnx2 *bp)
6165{
6166
6167	__bnx2_free_irq(bp);
6168	if (bp->flags & BNX2_FLAG_USING_MSI)
6169		pci_disable_msi(bp->pdev);
6170	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6171		pci_disable_msix(bp->pdev);
6172
6173	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6174}
6175
6176static void
6177bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6178{
6179	int i, total_vecs, rc;
6180	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6181	struct net_device *dev = bp->dev;
6182	const int len = sizeof(bp->irq_tbl[0].name);
6183
6184	bnx2_setup_msix_tbl(bp);
6185	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6186	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6187	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6188
6189	/*  Need to flush the previous three writes to ensure MSI-X
6190	 *  is setup properly */
6191	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6192
6193	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6194		msix_ent[i].entry = i;
6195		msix_ent[i].vector = 0;
6196	}
6197
6198	total_vecs = msix_vecs;
6199#ifdef BCM_CNIC
6200	total_vecs++;
6201#endif
6202	rc = -ENOSPC;
6203	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6204		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6205		if (rc <= 0)
6206			break;
6207		if (rc > 0)
6208			total_vecs = rc;
6209	}
6210
6211	if (rc != 0)
6212		return;
6213
6214	msix_vecs = total_vecs;
6215#ifdef BCM_CNIC
6216	msix_vecs--;
6217#endif
6218	bp->irq_nvecs = msix_vecs;
6219	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6220	for (i = 0; i < total_vecs; i++) {
6221		bp->irq_tbl[i].vector = msix_ent[i].vector;
6222		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6223		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6224	}
6225}
6226
6227static int
6228bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6229{
6230	int cpus = num_online_cpus();
6231	int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6232
6233	bp->irq_tbl[0].handler = bnx2_interrupt;
6234	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6235	bp->irq_nvecs = 1;
6236	bp->irq_tbl[0].vector = bp->pdev->irq;
6237
6238	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6239		bnx2_enable_msix(bp, msix_vecs);
6240
6241	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6242	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6243		if (pci_enable_msi(bp->pdev) == 0) {
6244			bp->flags |= BNX2_FLAG_USING_MSI;
6245			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6246				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6247				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6248			} else
6249				bp->irq_tbl[0].handler = bnx2_msi;
6250
6251			bp->irq_tbl[0].vector = bp->pdev->irq;
6252		}
6253	}
6254
6255	bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6256	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6257
6258	bp->num_rx_rings = bp->irq_nvecs;
6259	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6260}
6261
6262/* Called with rtnl_lock */
6263static int
6264bnx2_open(struct net_device *dev)
6265{
6266	struct bnx2 *bp = netdev_priv(dev);
6267	int rc;
6268
6269	netif_carrier_off(dev);
6270
6271	bnx2_set_power_state(bp, PCI_D0);
6272	bnx2_disable_int(bp);
6273
6274	rc = bnx2_setup_int_mode(bp, disable_msi);
6275	if (rc)
6276		goto open_err;
6277	bnx2_init_napi(bp);
6278	bnx2_napi_enable(bp);
6279	rc = bnx2_alloc_mem(bp);
6280	if (rc)
6281		goto open_err;
6282
6283	rc = bnx2_request_irq(bp);
6284	if (rc)
6285		goto open_err;
6286
6287	rc = bnx2_init_nic(bp, 1);
6288	if (rc)
6289		goto open_err;
6290
6291	mod_timer(&bp->timer, jiffies + bp->current_interval);
6292
6293	atomic_set(&bp->intr_sem, 0);
6294
6295	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6296
6297	bnx2_enable_int(bp);
6298
6299	if (bp->flags & BNX2_FLAG_USING_MSI) {
6300		/* Test MSI to make sure it is working
6301		 * If MSI test fails, go back to INTx mode
6302		 */
6303		if (bnx2_test_intr(bp) != 0) {
6304			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6305
6306			bnx2_disable_int(bp);
6307			bnx2_free_irq(bp);
6308
6309			bnx2_setup_int_mode(bp, 1);
6310
6311			rc = bnx2_init_nic(bp, 0);
6312
6313			if (!rc)
6314				rc = bnx2_request_irq(bp);
6315
6316			if (rc) {
6317				del_timer_sync(&bp->timer);
6318				goto open_err;
6319			}
6320			bnx2_enable_int(bp);
6321		}
6322	}
6323	if (bp->flags & BNX2_FLAG_USING_MSI)
6324		netdev_info(dev, "using MSI\n");
6325	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6326		netdev_info(dev, "using MSIX\n");
6327
6328	netif_tx_start_all_queues(dev);
6329
6330	return 0;
6331
6332open_err:
6333	bnx2_napi_disable(bp);
6334	bnx2_free_skbs(bp);
6335	bnx2_free_irq(bp);
6336	bnx2_free_mem(bp);
6337	bnx2_del_napi(bp);
6338	return rc;
6339}
6340
6341static void
6342bnx2_reset_task(struct work_struct *work)
6343{
6344	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6345	int rc;
6346
6347	rtnl_lock();
6348	if (!netif_running(bp->dev)) {
6349		rtnl_unlock();
6350		return;
6351	}
6352
6353	bnx2_netif_stop(bp, true);
6354
6355	rc = bnx2_init_nic(bp, 1);
6356	if (rc) {
6357		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6358		bnx2_napi_enable(bp);
6359		dev_close(bp->dev);
6360		rtnl_unlock();
6361		return;
6362	}
6363
6364	atomic_set(&bp->intr_sem, 1);
6365	bnx2_netif_start(bp, true);
6366	rtnl_unlock();
6367}
6368
6369static void
6370bnx2_dump_state(struct bnx2 *bp)
6371{
6372	struct net_device *dev = bp->dev;
6373	u32 val1, val2;
6374
6375	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6376	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6377		   atomic_read(&bp->intr_sem), val1);
6378	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6379	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6380	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6381	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6382		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6383		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6384	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6385		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6386	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6387		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6388	if (bp->flags & BNX2_FLAG_USING_MSIX)
6389		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6390			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6391}
6392
6393static void
6394bnx2_tx_timeout(struct net_device *dev)
6395{
6396	struct bnx2 *bp = netdev_priv(dev);
6397
6398	bnx2_dump_state(bp);
6399	bnx2_dump_mcp_state(bp);
6400
6401	/* This allows the netif to be shutdown gracefully before resetting */
6402	schedule_work(&bp->reset_task);
6403}
6404
6405/* Called with netif_tx_lock.
6406 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6407 * netif_wake_queue().
6408 */
6409static netdev_tx_t
6410bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6411{
6412	struct bnx2 *bp = netdev_priv(dev);
6413	dma_addr_t mapping;
6414	struct tx_bd *txbd;
6415	struct sw_tx_bd *tx_buf;
6416	u32 len, vlan_tag_flags, last_frag, mss;
6417	u16 prod, ring_prod;
6418	int i;
6419	struct bnx2_napi *bnapi;
6420	struct bnx2_tx_ring_info *txr;
6421	struct netdev_queue *txq;
6422
6423	/*  Determine which tx ring we will be placed on */
6424	i = skb_get_queue_mapping(skb);
6425	bnapi = &bp->bnx2_napi[i];
6426	txr = &bnapi->tx_ring;
6427	txq = netdev_get_tx_queue(dev, i);
6428
6429	if (unlikely(bnx2_tx_avail(bp, txr) <
6430	    (skb_shinfo(skb)->nr_frags + 1))) {
6431		netif_tx_stop_queue(txq);
6432		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6433
6434		return NETDEV_TX_BUSY;
6435	}
6436	len = skb_headlen(skb);
6437	prod = txr->tx_prod;
6438	ring_prod = TX_RING_IDX(prod);
6439
6440	vlan_tag_flags = 0;
6441	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6442		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6443	}
6444
6445	if (vlan_tx_tag_present(skb)) {
6446		vlan_tag_flags |=
6447			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6448	}
6449
6450	if ((mss = skb_shinfo(skb)->gso_size)) {
6451		u32 tcp_opt_len;
6452		struct iphdr *iph;
6453
6454		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6455
6456		tcp_opt_len = tcp_optlen(skb);
6457
6458		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6459			u32 tcp_off = skb_transport_offset(skb) -
6460				      sizeof(struct ipv6hdr) - ETH_HLEN;
6461
6462			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6463					  TX_BD_FLAGS_SW_FLAGS;
6464			if (likely(tcp_off == 0))
6465				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6466			else {
6467				tcp_off >>= 3;
6468				vlan_tag_flags |= ((tcp_off & 0x3) <<
6469						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6470						  ((tcp_off & 0x10) <<
6471						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6472				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6473			}
6474		} else {
6475			iph = ip_hdr(skb);
6476			if (tcp_opt_len || (iph->ihl > 5)) {
6477				vlan_tag_flags |= ((iph->ihl - 5) +
6478						   (tcp_opt_len >> 2)) << 8;
6479			}
6480		}
6481	} else
6482		mss = 0;
6483
6484	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6485	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6486		dev_kfree_skb(skb);
6487		return NETDEV_TX_OK;
6488	}
6489
6490	tx_buf = &txr->tx_buf_ring[ring_prod];
6491	tx_buf->skb = skb;
6492	dma_unmap_addr_set(tx_buf, mapping, mapping);
6493
6494	txbd = &txr->tx_desc_ring[ring_prod];
6495
6496	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6497	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6498	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6499	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6500
6501	last_frag = skb_shinfo(skb)->nr_frags;
6502	tx_buf->nr_frags = last_frag;
6503	tx_buf->is_gso = skb_is_gso(skb);
6504
6505	for (i = 0; i < last_frag; i++) {
6506		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6507
6508		prod = NEXT_TX_BD(prod);
6509		ring_prod = TX_RING_IDX(prod);
6510		txbd = &txr->tx_desc_ring[ring_prod];
6511
6512		len = frag->size;
6513		mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6514				       len, PCI_DMA_TODEVICE);
6515		if (dma_mapping_error(&bp->pdev->dev, mapping))
6516			goto dma_error;
6517		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6518				   mapping);
6519
6520		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6521		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6522		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6523		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6524
6525	}
6526	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6527
6528	prod = NEXT_TX_BD(prod);
6529	txr->tx_prod_bseq += skb->len;
6530
6531	REG_WR16(bp, txr->tx_bidx_addr, prod);
6532	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6533
6534	mmiowb();
6535
6536	txr->tx_prod = prod;
6537
6538	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6539		netif_tx_stop_queue(txq);
6540
6541		/* netif_tx_stop_queue() must be done before checking
6542		 * tx index in bnx2_tx_avail() below, because in
6543		 * bnx2_tx_int(), we update tx index before checking for
6544		 * netif_tx_queue_stopped().
6545		 */
6546		smp_mb();
6547		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6548			netif_tx_wake_queue(txq);
6549	}
6550
6551	return NETDEV_TX_OK;
6552dma_error:
6553	/* save value of frag that failed */
6554	last_frag = i;
6555
6556	/* start back at beginning and unmap skb */
6557	prod = txr->tx_prod;
6558	ring_prod = TX_RING_IDX(prod);
6559	tx_buf = &txr->tx_buf_ring[ring_prod];
6560	tx_buf->skb = NULL;
6561	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6562			 skb_headlen(skb), PCI_DMA_TODEVICE);
6563
6564	/* unmap remaining mapped pages */
6565	for (i = 0; i < last_frag; i++) {
6566		prod = NEXT_TX_BD(prod);
6567		ring_prod = TX_RING_IDX(prod);
6568		tx_buf = &txr->tx_buf_ring[ring_prod];
6569		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6570			       skb_shinfo(skb)->frags[i].size,
6571			       PCI_DMA_TODEVICE);
6572	}
6573
6574	dev_kfree_skb(skb);
6575	return NETDEV_TX_OK;
6576}
6577
6578/* Called with rtnl_lock */
6579static int
6580bnx2_close(struct net_device *dev)
6581{
6582	struct bnx2 *bp = netdev_priv(dev);
6583
6584	bnx2_disable_int_sync(bp);
6585	bnx2_napi_disable(bp);
6586	del_timer_sync(&bp->timer);
6587	bnx2_shutdown_chip(bp);
6588	bnx2_free_irq(bp);
6589	bnx2_free_skbs(bp);
6590	bnx2_free_mem(bp);
6591	bnx2_del_napi(bp);
6592	bp->link_up = 0;
6593	netif_carrier_off(bp->dev);
6594	bnx2_set_power_state(bp, PCI_D3hot);
6595	return 0;
6596}
6597
6598static void
6599bnx2_save_stats(struct bnx2 *bp)
6600{
6601	u32 *hw_stats = (u32 *) bp->stats_blk;
6602	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6603	int i;
6604
6605	/* The 1st 10 counters are 64-bit counters */
6606	for (i = 0; i < 20; i += 2) {
6607		u32 hi;
6608		u64 lo;
6609
6610		hi = temp_stats[i] + hw_stats[i];
6611		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6612		if (lo > 0xffffffff)
6613			hi++;
6614		temp_stats[i] = hi;
6615		temp_stats[i + 1] = lo & 0xffffffff;
6616	}
6617
6618	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6619		temp_stats[i] += hw_stats[i];
6620}
6621
6622#define GET_64BIT_NET_STATS64(ctr)		\
6623	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6624
6625#define GET_64BIT_NET_STATS(ctr)				\
6626	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6627	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6628
6629#define GET_32BIT_NET_STATS(ctr)				\
6630	(unsigned long) (bp->stats_blk->ctr +			\
6631			 bp->temp_stats_blk->ctr)
6632
6633static struct rtnl_link_stats64 *
6634bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6635{
6636	struct bnx2 *bp = netdev_priv(dev);
6637
6638	if (bp->stats_blk == NULL)
6639		return net_stats;
6640
6641	net_stats->rx_packets =
6642		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6643		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6644		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6645
6646	net_stats->tx_packets =
6647		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6648		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6649		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6650
6651	net_stats->rx_bytes =
6652		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6653
6654	net_stats->tx_bytes =
6655		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6656
6657	net_stats->multicast =
6658		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6659
6660	net_stats->collisions =
6661		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6662
6663	net_stats->rx_length_errors =
6664		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6665		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6666
6667	net_stats->rx_over_errors =
6668		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6669		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6670
6671	net_stats->rx_frame_errors =
6672		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6673
6674	net_stats->rx_crc_errors =
6675		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6676
6677	net_stats->rx_errors = net_stats->rx_length_errors +
6678		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6679		net_stats->rx_crc_errors;
6680
6681	net_stats->tx_aborted_errors =
6682		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6683		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6684
6685	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6686	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
6687		net_stats->tx_carrier_errors = 0;
6688	else {
6689		net_stats->tx_carrier_errors =
6690			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6691	}
6692
6693	net_stats->tx_errors =
6694		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6695		net_stats->tx_aborted_errors +
6696		net_stats->tx_carrier_errors;
6697
6698	net_stats->rx_missed_errors =
6699		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6700		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6701		GET_32BIT_NET_STATS(stat_FwRxDrop);
6702
6703	return net_stats;
6704}
6705
6706/* All ethtool functions called with rtnl_lock */
6707
6708static int
6709bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6710{
6711	struct bnx2 *bp = netdev_priv(dev);
6712	int support_serdes = 0, support_copper = 0;
6713
6714	cmd->supported = SUPPORTED_Autoneg;
6715	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6716		support_serdes = 1;
6717		support_copper = 1;
6718	} else if (bp->phy_port == PORT_FIBRE)
6719		support_serdes = 1;
6720	else
6721		support_copper = 1;
6722
6723	if (support_serdes) {
6724		cmd->supported |= SUPPORTED_1000baseT_Full |
6725			SUPPORTED_FIBRE;
6726		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6727			cmd->supported |= SUPPORTED_2500baseX_Full;
6728
6729	}
6730	if (support_copper) {
6731		cmd->supported |= SUPPORTED_10baseT_Half |
6732			SUPPORTED_10baseT_Full |
6733			SUPPORTED_100baseT_Half |
6734			SUPPORTED_100baseT_Full |
6735			SUPPORTED_1000baseT_Full |
6736			SUPPORTED_TP;
6737
6738	}
6739
6740	spin_lock_bh(&bp->phy_lock);
6741	cmd->port = bp->phy_port;
6742	cmd->advertising = bp->advertising;
6743
6744	if (bp->autoneg & AUTONEG_SPEED) {
6745		cmd->autoneg = AUTONEG_ENABLE;
6746	} else {
6747		cmd->autoneg = AUTONEG_DISABLE;
6748	}
6749
6750	if (netif_carrier_ok(dev)) {
6751		ethtool_cmd_speed_set(cmd, bp->line_speed);
6752		cmd->duplex = bp->duplex;
6753	}
6754	else {
6755		ethtool_cmd_speed_set(cmd, -1);
6756		cmd->duplex = -1;
6757	}
6758	spin_unlock_bh(&bp->phy_lock);
6759
6760	cmd->transceiver = XCVR_INTERNAL;
6761	cmd->phy_address = bp->phy_addr;
6762
6763	return 0;
6764}
6765
6766static int
6767bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6768{
6769	struct bnx2 *bp = netdev_priv(dev);
6770	u8 autoneg = bp->autoneg;
6771	u8 req_duplex = bp->req_duplex;
6772	u16 req_line_speed = bp->req_line_speed;
6773	u32 advertising = bp->advertising;
6774	int err = -EINVAL;
6775
6776	spin_lock_bh(&bp->phy_lock);
6777
6778	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6779		goto err_out_unlock;
6780
6781	if (cmd->port != bp->phy_port &&
6782	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6783		goto err_out_unlock;
6784
6785	/* If device is down, we can store the settings only if the user
6786	 * is setting the currently active port.
6787	 */
6788	if (!netif_running(dev) && cmd->port != bp->phy_port)
6789		goto err_out_unlock;
6790
6791	if (cmd->autoneg == AUTONEG_ENABLE) {
6792		autoneg |= AUTONEG_SPEED;
6793
6794		advertising = cmd->advertising;
6795		if (cmd->port == PORT_TP) {
6796			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6797			if (!advertising)
6798				advertising = ETHTOOL_ALL_COPPER_SPEED;
6799		} else {
6800			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6801			if (!advertising)
6802				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6803		}
6804		advertising |= ADVERTISED_Autoneg;
6805	}
6806	else {
6807		u32 speed = ethtool_cmd_speed(cmd);
6808		if (cmd->port == PORT_FIBRE) {
6809			if ((speed != SPEED_1000 &&
6810			     speed != SPEED_2500) ||
6811			    (cmd->duplex != DUPLEX_FULL))
6812				goto err_out_unlock;
6813
6814			if (speed == SPEED_2500 &&
6815			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6816				goto err_out_unlock;
6817		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6818			goto err_out_unlock;
6819
6820		autoneg &= ~AUTONEG_SPEED;
6821		req_line_speed = speed;
6822		req_duplex = cmd->duplex;
6823		advertising = 0;
6824	}
6825
6826	bp->autoneg = autoneg;
6827	bp->advertising = advertising;
6828	bp->req_line_speed = req_line_speed;
6829	bp->req_duplex = req_duplex;
6830
6831	err = 0;
6832	/* If device is down, the new settings will be picked up when it is
6833	 * brought up.
6834	 */
6835	if (netif_running(dev))
6836		err = bnx2_setup_phy(bp, cmd->port);
6837
6838err_out_unlock:
6839	spin_unlock_bh(&bp->phy_lock);
6840
6841	return err;
6842}
6843
6844static void
6845bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6846{
6847	struct bnx2 *bp = netdev_priv(dev);
6848
6849	strcpy(info->driver, DRV_MODULE_NAME);
6850	strcpy(info->version, DRV_MODULE_VERSION);
6851	strcpy(info->bus_info, pci_name(bp->pdev));
6852	strcpy(info->fw_version, bp->fw_version);
6853}
6854
6855#define BNX2_REGDUMP_LEN		(32 * 1024)
6856
6857static int
6858bnx2_get_regs_len(struct net_device *dev)
6859{
6860	return BNX2_REGDUMP_LEN;
6861}
6862
6863static void
6864bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6865{
6866	u32 *p = _p, i, offset;
6867	u8 *orig_p = _p;
6868	struct bnx2 *bp = netdev_priv(dev);
6869	static const u32 reg_boundaries[] = {
6870		0x0000, 0x0098, 0x0400, 0x045c,
6871		0x0800, 0x0880, 0x0c00, 0x0c10,
6872		0x0c30, 0x0d08, 0x1000, 0x101c,
6873		0x1040, 0x1048, 0x1080, 0x10a4,
6874		0x1400, 0x1490, 0x1498, 0x14f0,
6875		0x1500, 0x155c, 0x1580, 0x15dc,
6876		0x1600, 0x1658, 0x1680, 0x16d8,
6877		0x1800, 0x1820, 0x1840, 0x1854,
6878		0x1880, 0x1894, 0x1900, 0x1984,
6879		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6880		0x1c80, 0x1c94, 0x1d00, 0x1d84,
6881		0x2000, 0x2030, 0x23c0, 0x2400,
6882		0x2800, 0x2820, 0x2830, 0x2850,
6883		0x2b40, 0x2c10, 0x2fc0, 0x3058,
6884		0x3c00, 0x3c94, 0x4000, 0x4010,
6885		0x4080, 0x4090, 0x43c0, 0x4458,
6886		0x4c00, 0x4c18, 0x4c40, 0x4c54,
6887		0x4fc0, 0x5010, 0x53c0, 0x5444,
6888		0x5c00, 0x5c18, 0x5c80, 0x5c90,
6889		0x5fc0, 0x6000, 0x6400, 0x6428,
6890		0x6800, 0x6848, 0x684c, 0x6860,
6891		0x6888, 0x6910, 0x8000
6892	};
6893
6894	regs->version = 0;
6895
6896	memset(p, 0, BNX2_REGDUMP_LEN);
6897
6898	if (!netif_running(bp->dev))
6899		return;
6900
6901	i = 0;
6902	offset = reg_boundaries[0];
6903	p += offset;
6904	while (offset < BNX2_REGDUMP_LEN) {
6905		*p++ = REG_RD(bp, offset);
6906		offset += 4;
6907		if (offset == reg_boundaries[i + 1]) {
6908			offset = reg_boundaries[i + 2];
6909			p = (u32 *) (orig_p + offset);
6910			i += 2;
6911		}
6912	}
6913}
6914
6915static void
6916bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6917{
6918	struct bnx2 *bp = netdev_priv(dev);
6919
6920	if (bp->flags & BNX2_FLAG_NO_WOL) {
6921		wol->supported = 0;
6922		wol->wolopts = 0;
6923	}
6924	else {
6925		wol->supported = WAKE_MAGIC;
6926		if (bp->wol)
6927			wol->wolopts = WAKE_MAGIC;
6928		else
6929			wol->wolopts = 0;
6930	}
6931	memset(&wol->sopass, 0, sizeof(wol->sopass));
6932}
6933
6934static int
6935bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6936{
6937	struct bnx2 *bp = netdev_priv(dev);
6938
6939	if (wol->wolopts & ~WAKE_MAGIC)
6940		return -EINVAL;
6941
6942	if (wol->wolopts & WAKE_MAGIC) {
6943		if (bp->flags & BNX2_FLAG_NO_WOL)
6944			return -EINVAL;
6945
6946		bp->wol = 1;
6947	}
6948	else {
6949		bp->wol = 0;
6950	}
6951	return 0;
6952}
6953
6954static int
6955bnx2_nway_reset(struct net_device *dev)
6956{
6957	struct bnx2 *bp = netdev_priv(dev);
6958	u32 bmcr;
6959
6960	if (!netif_running(dev))
6961		return -EAGAIN;
6962
6963	if (!(bp->autoneg & AUTONEG_SPEED)) {
6964		return -EINVAL;
6965	}
6966
6967	spin_lock_bh(&bp->phy_lock);
6968
6969	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6970		int rc;
6971
6972		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6973		spin_unlock_bh(&bp->phy_lock);
6974		return rc;
6975	}
6976
6977	/* Force a link down visible on the other side */
6978	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6979		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6980		spin_unlock_bh(&bp->phy_lock);
6981
6982		msleep(20);
6983
6984		spin_lock_bh(&bp->phy_lock);
6985
6986		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6987		bp->serdes_an_pending = 1;
6988		mod_timer(&bp->timer, jiffies + bp->current_interval);
6989	}
6990
6991	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6992	bmcr &= ~BMCR_LOOPBACK;
6993	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6994
6995	spin_unlock_bh(&bp->phy_lock);
6996
6997	return 0;
6998}
6999
7000static u32
7001bnx2_get_link(struct net_device *dev)
7002{
7003	struct bnx2 *bp = netdev_priv(dev);
7004
7005	return bp->link_up;
7006}
7007
7008static int
7009bnx2_get_eeprom_len(struct net_device *dev)
7010{
7011	struct bnx2 *bp = netdev_priv(dev);
7012
7013	if (bp->flash_info == NULL)
7014		return 0;
7015
7016	return (int) bp->flash_size;
7017}
7018
7019static int
7020bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7021		u8 *eebuf)
7022{
7023	struct bnx2 *bp = netdev_priv(dev);
7024	int rc;
7025
7026	if (!netif_running(dev))
7027		return -EAGAIN;
7028
7029	/* parameters already validated in ethtool_get_eeprom */
7030
7031	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7032
7033	return rc;
7034}
7035
7036static int
7037bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7038		u8 *eebuf)
7039{
7040	struct bnx2 *bp = netdev_priv(dev);
7041	int rc;
7042
7043	if (!netif_running(dev))
7044		return -EAGAIN;
7045
7046	/* parameters already validated in ethtool_set_eeprom */
7047
7048	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7049
7050	return rc;
7051}
7052
7053static int
7054bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7055{
7056	struct bnx2 *bp = netdev_priv(dev);
7057
7058	memset(coal, 0, sizeof(struct ethtool_coalesce));
7059
7060	coal->rx_coalesce_usecs = bp->rx_ticks;
7061	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7062	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7063	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7064
7065	coal->tx_coalesce_usecs = bp->tx_ticks;
7066	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7067	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7068	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7069
7070	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7071
7072	return 0;
7073}
7074
7075static int
7076bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7077{
7078	struct bnx2 *bp = netdev_priv(dev);
7079
7080	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7081	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7082
7083	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7084	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7085
7086	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7087	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7088
7089	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7090	if (bp->rx_quick_cons_trip_int > 0xff)
7091		bp->rx_quick_cons_trip_int = 0xff;
7092
7093	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7094	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7095
7096	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7097	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7098
7099	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7100	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7101
7102	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7103	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7104		0xff;
7105
7106	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7107	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7108		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7109			bp->stats_ticks = USEC_PER_SEC;
7110	}
7111	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7112		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7113	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7114
7115	if (netif_running(bp->dev)) {
7116		bnx2_netif_stop(bp, true);
7117		bnx2_init_nic(bp, 0);
7118		bnx2_netif_start(bp, true);
7119	}
7120
7121	return 0;
7122}
7123
7124static void
7125bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7126{
7127	struct bnx2 *bp = netdev_priv(dev);
7128
7129	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7130	ering->rx_mini_max_pending = 0;
7131	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7132
7133	ering->rx_pending = bp->rx_ring_size;
7134	ering->rx_mini_pending = 0;
7135	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7136
7137	ering->tx_max_pending = MAX_TX_DESC_CNT;
7138	ering->tx_pending = bp->tx_ring_size;
7139}
7140
7141static int
7142bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7143{
7144	if (netif_running(bp->dev)) {
7145		/* Reset will erase chipset stats; save them */
7146		bnx2_save_stats(bp);
7147
7148		bnx2_netif_stop(bp, true);
7149		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7150		__bnx2_free_irq(bp);
7151		bnx2_free_skbs(bp);
7152		bnx2_free_mem(bp);
7153	}
7154
7155	bnx2_set_rx_ring_size(bp, rx);
7156	bp->tx_ring_size = tx;
7157
7158	if (netif_running(bp->dev)) {
7159		int rc;
7160
7161		rc = bnx2_alloc_mem(bp);
7162		if (!rc)
7163			rc = bnx2_request_irq(bp);
7164
7165		if (!rc)
7166			rc = bnx2_init_nic(bp, 0);
7167
7168		if (rc) {
7169			bnx2_napi_enable(bp);
7170			dev_close(bp->dev);
7171			return rc;
7172		}
7173#ifdef BCM_CNIC
7174		mutex_lock(&bp->cnic_lock);
7175		/* Let cnic know about the new status block. */
7176		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7177			bnx2_setup_cnic_irq_info(bp);
7178		mutex_unlock(&bp->cnic_lock);
7179#endif
7180		bnx2_netif_start(bp, true);
7181	}
7182	return 0;
7183}
7184
7185static int
7186bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7187{
7188	struct bnx2 *bp = netdev_priv(dev);
7189	int rc;
7190
7191	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7192		(ering->tx_pending > MAX_TX_DESC_CNT) ||
7193		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7194
7195		return -EINVAL;
7196	}
7197	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7198	return rc;
7199}
7200
7201static void
7202bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7203{
7204	struct bnx2 *bp = netdev_priv(dev);
7205
7206	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7207	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7208	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7209}
7210
7211static int
7212bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7213{
7214	struct bnx2 *bp = netdev_priv(dev);
7215
7216	bp->req_flow_ctrl = 0;
7217	if (epause->rx_pause)
7218		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7219	if (epause->tx_pause)
7220		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7221
7222	if (epause->autoneg) {
7223		bp->autoneg |= AUTONEG_FLOW_CTRL;
7224	}
7225	else {
7226		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7227	}
7228
7229	if (netif_running(dev)) {
7230		spin_lock_bh(&bp->phy_lock);
7231		bnx2_setup_phy(bp, bp->phy_port);
7232		spin_unlock_bh(&bp->phy_lock);
7233	}
7234
7235	return 0;
7236}
7237
7238static struct {
7239	char string[ETH_GSTRING_LEN];
7240} bnx2_stats_str_arr[] = {
7241	{ "rx_bytes" },
7242	{ "rx_error_bytes" },
7243	{ "tx_bytes" },
7244	{ "tx_error_bytes" },
7245	{ "rx_ucast_packets" },
7246	{ "rx_mcast_packets" },
7247	{ "rx_bcast_packets" },
7248	{ "tx_ucast_packets" },
7249	{ "tx_mcast_packets" },
7250	{ "tx_bcast_packets" },
7251	{ "tx_mac_errors" },
7252	{ "tx_carrier_errors" },
7253	{ "rx_crc_errors" },
7254	{ "rx_align_errors" },
7255	{ "tx_single_collisions" },
7256	{ "tx_multi_collisions" },
7257	{ "tx_deferred" },
7258	{ "tx_excess_collisions" },
7259	{ "tx_late_collisions" },
7260	{ "tx_total_collisions" },
7261	{ "rx_fragments" },
7262	{ "rx_jabbers" },
7263	{ "rx_undersize_packets" },
7264	{ "rx_oversize_packets" },
7265	{ "rx_64_byte_packets" },
7266	{ "rx_65_to_127_byte_packets" },
7267	{ "rx_128_to_255_byte_packets" },
7268	{ "rx_256_to_511_byte_packets" },
7269	{ "rx_512_to_1023_byte_packets" },
7270	{ "rx_1024_to_1522_byte_packets" },
7271	{ "rx_1523_to_9022_byte_packets" },
7272	{ "tx_64_byte_packets" },
7273	{ "tx_65_to_127_byte_packets" },
7274	{ "tx_128_to_255_byte_packets" },
7275	{ "tx_256_to_511_byte_packets" },
7276	{ "tx_512_to_1023_byte_packets" },
7277	{ "tx_1024_to_1522_byte_packets" },
7278	{ "tx_1523_to_9022_byte_packets" },
7279	{ "rx_xon_frames" },
7280	{ "rx_xoff_frames" },
7281	{ "tx_xon_frames" },
7282	{ "tx_xoff_frames" },
7283	{ "rx_mac_ctrl_frames" },
7284	{ "rx_filtered_packets" },
7285	{ "rx_ftq_discards" },
7286	{ "rx_discards" },
7287	{ "rx_fw_discards" },
7288};
7289
7290#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7291			sizeof(bnx2_stats_str_arr[0]))
7292
7293#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7294
7295static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7296    STATS_OFFSET32(stat_IfHCInOctets_hi),
7297    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7298    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7299    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7300    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7301    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7302    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7303    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7304    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7305    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7306    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7307    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7308    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7309    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7310    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7311    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7312    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7313    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7314    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7315    STATS_OFFSET32(stat_EtherStatsCollisions),
7316    STATS_OFFSET32(stat_EtherStatsFragments),
7317    STATS_OFFSET32(stat_EtherStatsJabbers),
7318    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7319    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7320    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7321    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7322    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7323    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7324    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7325    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7326    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7327    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7328    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7329    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7330    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7331    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7332    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7333    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7334    STATS_OFFSET32(stat_XonPauseFramesReceived),
7335    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7336    STATS_OFFSET32(stat_OutXonSent),
7337    STATS_OFFSET32(stat_OutXoffSent),
7338    STATS_OFFSET32(stat_MacControlFramesReceived),
7339    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7340    STATS_OFFSET32(stat_IfInFTQDiscards),
7341    STATS_OFFSET32(stat_IfInMBUFDiscards),
7342    STATS_OFFSET32(stat_FwRxDrop),
7343};
7344
7345/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7346 * skipped because of errata.
7347 */
7348static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7349	8,0,8,8,8,8,8,8,8,8,
7350	4,0,4,4,4,4,4,4,4,4,
7351	4,4,4,4,4,4,4,4,4,4,
7352	4,4,4,4,4,4,4,4,4,4,
7353	4,4,4,4,4,4,4,
7354};
7355
7356static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7357	8,0,8,8,8,8,8,8,8,8,
7358	4,4,4,4,4,4,4,4,4,4,
7359	4,4,4,4,4,4,4,4,4,4,
7360	4,4,4,4,4,4,4,4,4,4,
7361	4,4,4,4,4,4,4,
7362};
7363
7364#define BNX2_NUM_TESTS 6
7365
7366static struct {
7367	char string[ETH_GSTRING_LEN];
7368} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7369	{ "register_test (offline)" },
7370	{ "memory_test (offline)" },
7371	{ "loopback_test (offline)" },
7372	{ "nvram_test (online)" },
7373	{ "interrupt_test (online)" },
7374	{ "link_test (online)" },
7375};
7376
7377static int
7378bnx2_get_sset_count(struct net_device *dev, int sset)
7379{
7380	switch (sset) {
7381	case ETH_SS_TEST:
7382		return BNX2_NUM_TESTS;
7383	case ETH_SS_STATS:
7384		return BNX2_NUM_STATS;
7385	default:
7386		return -EOPNOTSUPP;
7387	}
7388}
7389
7390static void
7391bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7392{
7393	struct bnx2 *bp = netdev_priv(dev);
7394
7395	bnx2_set_power_state(bp, PCI_D0);
7396
7397	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7398	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7399		int i;
7400
7401		bnx2_netif_stop(bp, true);
7402		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7403		bnx2_free_skbs(bp);
7404
7405		if (bnx2_test_registers(bp) != 0) {
7406			buf[0] = 1;
7407			etest->flags |= ETH_TEST_FL_FAILED;
7408		}
7409		if (bnx2_test_memory(bp) != 0) {
7410			buf[1] = 1;
7411			etest->flags |= ETH_TEST_FL_FAILED;
7412		}
7413		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7414			etest->flags |= ETH_TEST_FL_FAILED;
7415
7416		if (!netif_running(bp->dev))
7417			bnx2_shutdown_chip(bp);
7418		else {
7419			bnx2_init_nic(bp, 1);
7420			bnx2_netif_start(bp, true);
7421		}
7422
7423		/* wait for link up */
7424		for (i = 0; i < 7; i++) {
7425			if (bp->link_up)
7426				break;
7427			msleep_interruptible(1000);
7428		}
7429	}
7430
7431	if (bnx2_test_nvram(bp) != 0) {
7432		buf[3] = 1;
7433		etest->flags |= ETH_TEST_FL_FAILED;
7434	}
7435	if (bnx2_test_intr(bp) != 0) {
7436		buf[4] = 1;
7437		etest->flags |= ETH_TEST_FL_FAILED;
7438	}
7439
7440	if (bnx2_test_link(bp) != 0) {
7441		buf[5] = 1;
7442		etest->flags |= ETH_TEST_FL_FAILED;
7443
7444	}
7445	if (!netif_running(bp->dev))
7446		bnx2_set_power_state(bp, PCI_D3hot);
7447}
7448
7449static void
7450bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7451{
7452	switch (stringset) {
7453	case ETH_SS_STATS:
7454		memcpy(buf, bnx2_stats_str_arr,
7455			sizeof(bnx2_stats_str_arr));
7456		break;
7457	case ETH_SS_TEST:
7458		memcpy(buf, bnx2_tests_str_arr,
7459			sizeof(bnx2_tests_str_arr));
7460		break;
7461	}
7462}
7463
7464static void
7465bnx2_get_ethtool_stats(struct net_device *dev,
7466		struct ethtool_stats *stats, u64 *buf)
7467{
7468	struct bnx2 *bp = netdev_priv(dev);
7469	int i;
7470	u32 *hw_stats = (u32 *) bp->stats_blk;
7471	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7472	u8 *stats_len_arr = NULL;
7473
7474	if (hw_stats == NULL) {
7475		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7476		return;
7477	}
7478
7479	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7480	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7481	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7482	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
7483		stats_len_arr = bnx2_5706_stats_len_arr;
7484	else
7485		stats_len_arr = bnx2_5708_stats_len_arr;
7486
7487	for (i = 0; i < BNX2_NUM_STATS; i++) {
7488		unsigned long offset;
7489
7490		if (stats_len_arr[i] == 0) {
7491			/* skip this counter */
7492			buf[i] = 0;
7493			continue;
7494		}
7495
7496		offset = bnx2_stats_offset_arr[i];
7497		if (stats_len_arr[i] == 4) {
7498			/* 4-byte counter */
7499			buf[i] = (u64) *(hw_stats + offset) +
7500				 *(temp_stats + offset);
7501			continue;
7502		}
7503		/* 8-byte counter */
7504		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7505			 *(hw_stats + offset + 1) +
7506			 (((u64) *(temp_stats + offset)) << 32) +
7507			 *(temp_stats + offset + 1);
7508	}
7509}
7510
7511static int
7512bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7513{
7514	struct bnx2 *bp = netdev_priv(dev);
7515
7516	switch (state) {
7517	case ETHTOOL_ID_ACTIVE:
7518		bnx2_set_power_state(bp, PCI_D0);
7519
7520		bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
7521		REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7522		return 1;	/* cycle on/off once per second */
7523
7524	case ETHTOOL_ID_ON:
7525		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7526		       BNX2_EMAC_LED_1000MB_OVERRIDE |
7527		       BNX2_EMAC_LED_100MB_OVERRIDE |
7528		       BNX2_EMAC_LED_10MB_OVERRIDE |
7529		       BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7530		       BNX2_EMAC_LED_TRAFFIC);
7531		break;
7532
7533	case ETHTOOL_ID_OFF:
7534		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7535		break;
7536
7537	case ETHTOOL_ID_INACTIVE:
7538		REG_WR(bp, BNX2_EMAC_LED, 0);
7539		REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7540
7541		if (!netif_running(dev))
7542			bnx2_set_power_state(bp, PCI_D3hot);
7543		break;
7544	}
7545
7546	return 0;
7547}
7548
7549static u32
7550bnx2_fix_features(struct net_device *dev, u32 features)
7551{
7552	struct bnx2 *bp = netdev_priv(dev);
7553
7554	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
7555		features |= NETIF_F_HW_VLAN_RX;
7556
7557	return features;
7558}
7559
7560static int
7561bnx2_set_features(struct net_device *dev, u32 features)
7562{
7563	struct bnx2 *bp = netdev_priv(dev);
7564
7565	/* TSO with VLAN tag won't work with current firmware */
7566	if (features & NETIF_F_HW_VLAN_TX)
7567		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7568	else
7569		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7570
7571	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
7572	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7573	    netif_running(dev)) {
7574		bnx2_netif_stop(bp, false);
7575		dev->features = features;
7576		bnx2_set_rx_mode(dev);
7577		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7578		bnx2_netif_start(bp, false);
7579		return 1;
7580	}
7581
7582	return 0;
7583}
7584
7585static const struct ethtool_ops bnx2_ethtool_ops = {
7586	.get_settings		= bnx2_get_settings,
7587	.set_settings		= bnx2_set_settings,
7588	.get_drvinfo		= bnx2_get_drvinfo,
7589	.get_regs_len		= bnx2_get_regs_len,
7590	.get_regs		= bnx2_get_regs,
7591	.get_wol		= bnx2_get_wol,
7592	.set_wol		= bnx2_set_wol,
7593	.nway_reset		= bnx2_nway_reset,
7594	.get_link		= bnx2_get_link,
7595	.get_eeprom_len		= bnx2_get_eeprom_len,
7596	.get_eeprom		= bnx2_get_eeprom,
7597	.set_eeprom		= bnx2_set_eeprom,
7598	.get_coalesce		= bnx2_get_coalesce,
7599	.set_coalesce		= bnx2_set_coalesce,
7600	.get_ringparam		= bnx2_get_ringparam,
7601	.set_ringparam		= bnx2_set_ringparam,
7602	.get_pauseparam		= bnx2_get_pauseparam,
7603	.set_pauseparam		= bnx2_set_pauseparam,
7604	.self_test		= bnx2_self_test,
7605	.get_strings		= bnx2_get_strings,
7606	.set_phys_id		= bnx2_set_phys_id,
7607	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7608	.get_sset_count		= bnx2_get_sset_count,
7609};
7610
7611/* Called with rtnl_lock */
7612static int
7613bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7614{
7615	struct mii_ioctl_data *data = if_mii(ifr);
7616	struct bnx2 *bp = netdev_priv(dev);
7617	int err;
7618
7619	switch(cmd) {
7620	case SIOCGMIIPHY:
7621		data->phy_id = bp->phy_addr;
7622
7623		/* fallthru */
7624	case SIOCGMIIREG: {
7625		u32 mii_regval;
7626
7627		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7628			return -EOPNOTSUPP;
7629
7630		if (!netif_running(dev))
7631			return -EAGAIN;
7632
7633		spin_lock_bh(&bp->phy_lock);
7634		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7635		spin_unlock_bh(&bp->phy_lock);
7636
7637		data->val_out = mii_regval;
7638
7639		return err;
7640	}
7641
7642	case SIOCSMIIREG:
7643		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7644			return -EOPNOTSUPP;
7645
7646		if (!netif_running(dev))
7647			return -EAGAIN;
7648
7649		spin_lock_bh(&bp->phy_lock);
7650		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7651		spin_unlock_bh(&bp->phy_lock);
7652
7653		return err;
7654
7655	default:
7656		/* do nothing */
7657		break;
7658	}
7659	return -EOPNOTSUPP;
7660}
7661
7662/* Called with rtnl_lock */
7663static int
7664bnx2_change_mac_addr(struct net_device *dev, void *p)
7665{
7666	struct sockaddr *addr = p;
7667	struct bnx2 *bp = netdev_priv(dev);
7668
7669	if (!is_valid_ether_addr(addr->sa_data))
7670		return -EINVAL;
7671
7672	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7673	if (netif_running(dev))
7674		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7675
7676	return 0;
7677}
7678
7679/* Called with rtnl_lock */
7680static int
7681bnx2_change_mtu(struct net_device *dev, int new_mtu)
7682{
7683	struct bnx2 *bp = netdev_priv(dev);
7684
7685	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7686		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7687		return -EINVAL;
7688
7689	dev->mtu = new_mtu;
7690	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
7691}
7692
7693#ifdef CONFIG_NET_POLL_CONTROLLER
7694static void
7695poll_bnx2(struct net_device *dev)
7696{
7697	struct bnx2 *bp = netdev_priv(dev);
7698	int i;
7699
7700	for (i = 0; i < bp->irq_nvecs; i++) {
7701		struct bnx2_irq *irq = &bp->irq_tbl[i];
7702
7703		disable_irq(irq->vector);
7704		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7705		enable_irq(irq->vector);
7706	}
7707}
7708#endif
7709
7710static void __devinit
7711bnx2_get_5709_media(struct bnx2 *bp)
7712{
7713	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7714	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7715	u32 strap;
7716
7717	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7718		return;
7719	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7720		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7721		return;
7722	}
7723
7724	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7725		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7726	else
7727		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7728
7729	if (PCI_FUNC(bp->pdev->devfn) == 0) {
7730		switch (strap) {
7731		case 0x4:
7732		case 0x5:
7733		case 0x6:
7734			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7735			return;
7736		}
7737	} else {
7738		switch (strap) {
7739		case 0x1:
7740		case 0x2:
7741		case 0x4:
7742			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7743			return;
7744		}
7745	}
7746}
7747
7748static void __devinit
7749bnx2_get_pci_speed(struct bnx2 *bp)
7750{
7751	u32 reg;
7752
7753	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7754	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7755		u32 clkreg;
7756
7757		bp->flags |= BNX2_FLAG_PCIX;
7758
7759		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7760
7761		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7762		switch (clkreg) {
7763		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7764			bp->bus_speed_mhz = 133;
7765			break;
7766
7767		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7768			bp->bus_speed_mhz = 100;
7769			break;
7770
7771		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7772		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7773			bp->bus_speed_mhz = 66;
7774			break;
7775
7776		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7777		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7778			bp->bus_speed_mhz = 50;
7779			break;
7780
7781		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7782		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7783		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7784			bp->bus_speed_mhz = 33;
7785			break;
7786		}
7787	}
7788	else {
7789		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7790			bp->bus_speed_mhz = 66;
7791		else
7792			bp->bus_speed_mhz = 33;
7793	}
7794
7795	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7796		bp->flags |= BNX2_FLAG_PCI_32BIT;
7797
7798}
7799
7800static void __devinit
7801bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7802{
7803	int rc, i, j;
7804	u8 *data;
7805	unsigned int block_end, rosize, len;
7806
7807#define BNX2_VPD_NVRAM_OFFSET	0x300
7808#define BNX2_VPD_LEN		128
7809#define BNX2_MAX_VER_SLEN	30
7810
7811	data = kmalloc(256, GFP_KERNEL);
7812	if (!data)
7813		return;
7814
7815	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7816			     BNX2_VPD_LEN);
7817	if (rc)
7818		goto vpd_done;
7819
7820	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7821		data[i] = data[i + BNX2_VPD_LEN + 3];
7822		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7823		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7824		data[i + 3] = data[i + BNX2_VPD_LEN];
7825	}
7826
7827	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7828	if (i < 0)
7829		goto vpd_done;
7830
7831	rosize = pci_vpd_lrdt_size(&data[i]);
7832	i += PCI_VPD_LRDT_TAG_SIZE;
7833	block_end = i + rosize;
7834
7835	if (block_end > BNX2_VPD_LEN)
7836		goto vpd_done;
7837
7838	j = pci_vpd_find_info_keyword(data, i, rosize,
7839				      PCI_VPD_RO_KEYWORD_MFR_ID);
7840	if (j < 0)
7841		goto vpd_done;
7842
7843	len = pci_vpd_info_field_size(&data[j]);
7844
7845	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7846	if (j + len > block_end || len != 4 ||
7847	    memcmp(&data[j], "1028", 4))
7848		goto vpd_done;
7849
7850	j = pci_vpd_find_info_keyword(data, i, rosize,
7851				      PCI_VPD_RO_KEYWORD_VENDOR0);
7852	if (j < 0)
7853		goto vpd_done;
7854
7855	len = pci_vpd_info_field_size(&data[j]);
7856
7857	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7858	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7859		goto vpd_done;
7860
7861	memcpy(bp->fw_version, &data[j], len);
7862	bp->fw_version[len] = ' ';
7863
7864vpd_done:
7865	kfree(data);
7866}
7867
7868static int __devinit
7869bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7870{
7871	struct bnx2 *bp;
7872	unsigned long mem_len;
7873	int rc, i, j;
7874	u32 reg;
7875	u64 dma_mask, persist_dma_mask;
7876	int err;
7877
7878	SET_NETDEV_DEV(dev, &pdev->dev);
7879	bp = netdev_priv(dev);
7880
7881	bp->flags = 0;
7882	bp->phy_flags = 0;
7883
7884	bp->temp_stats_blk =
7885		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7886
7887	if (bp->temp_stats_blk == NULL) {
7888		rc = -ENOMEM;
7889		goto err_out;
7890	}
7891
7892	/* enable device (incl. PCI PM wakeup), and bus-mastering */
7893	rc = pci_enable_device(pdev);
7894	if (rc) {
7895		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7896		goto err_out;
7897	}
7898
7899	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7900		dev_err(&pdev->dev,
7901			"Cannot find PCI device base address, aborting\n");
7902		rc = -ENODEV;
7903		goto err_out_disable;
7904	}
7905
7906	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7907	if (rc) {
7908		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7909		goto err_out_disable;
7910	}
7911
7912	pci_set_master(pdev);
7913
7914	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7915	if (bp->pm_cap == 0) {
7916		dev_err(&pdev->dev,
7917			"Cannot find power management capability, aborting\n");
7918		rc = -EIO;
7919		goto err_out_release;
7920	}
7921
7922	bp->dev = dev;
7923	bp->pdev = pdev;
7924
7925	spin_lock_init(&bp->phy_lock);
7926	spin_lock_init(&bp->indirect_lock);
7927#ifdef BCM_CNIC
7928	mutex_init(&bp->cnic_lock);
7929#endif
7930	INIT_WORK(&bp->reset_task, bnx2_reset_task);
7931
7932	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7933	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7934	dev->mem_end = dev->mem_start + mem_len;
7935	dev->irq = pdev->irq;
7936
7937	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7938
7939	if (!bp->regview) {
7940		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7941		rc = -ENOMEM;
7942		goto err_out_release;
7943	}
7944
7945	bnx2_set_power_state(bp, PCI_D0);
7946
7947	/* Configure byte swap and enable write to the reg_window registers.
7948	 * Rely on CPU to do target byte swapping on big endian systems
7949	 * The chip's target access swapping will not swap all accesses
7950	 */
7951	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7952		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7953		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7954
7955	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7956
7957	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7958		if (!pci_is_pcie(pdev)) {
7959			dev_err(&pdev->dev, "Not PCIE, aborting\n");
7960			rc = -EIO;
7961			goto err_out_unmap;
7962		}
7963		bp->flags |= BNX2_FLAG_PCIE;
7964		if (CHIP_REV(bp) == CHIP_REV_Ax)
7965			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7966
7967		/* AER (Advanced Error Reporting) hooks */
7968		err = pci_enable_pcie_error_reporting(pdev);
7969		if (!err)
7970			bp->flags |= BNX2_FLAG_AER_ENABLED;
7971
7972	} else {
7973		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7974		if (bp->pcix_cap == 0) {
7975			dev_err(&pdev->dev,
7976				"Cannot find PCIX capability, aborting\n");
7977			rc = -EIO;
7978			goto err_out_unmap;
7979		}
7980		bp->flags |= BNX2_FLAG_BROKEN_STATS;
7981	}
7982
7983	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7984		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7985			bp->flags |= BNX2_FLAG_MSIX_CAP;
7986	}
7987
7988	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7989		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7990			bp->flags |= BNX2_FLAG_MSI_CAP;
7991	}
7992
7993	/* 5708 cannot support DMA addresses > 40-bit.  */
7994	if (CHIP_NUM(bp) == CHIP_NUM_5708)
7995		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7996	else
7997		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7998
7999	/* Configure DMA attributes. */
8000	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8001		dev->features |= NETIF_F_HIGHDMA;
8002		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8003		if (rc) {
8004			dev_err(&pdev->dev,
8005				"pci_set_consistent_dma_mask failed, aborting\n");
8006			goto err_out_unmap;
8007		}
8008	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8009		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8010		goto err_out_unmap;
8011	}
8012
8013	if (!(bp->flags & BNX2_FLAG_PCIE))
8014		bnx2_get_pci_speed(bp);
8015
8016	/* 5706A0 may falsely detect SERR and PERR. */
8017	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8018		reg = REG_RD(bp, PCI_COMMAND);
8019		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8020		REG_WR(bp, PCI_COMMAND, reg);
8021	}
8022	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8023		!(bp->flags & BNX2_FLAG_PCIX)) {
8024
8025		dev_err(&pdev->dev,
8026			"5706 A1 can only be used in a PCIX bus, aborting\n");
8027		goto err_out_unmap;
8028	}
8029
8030	bnx2_init_nvram(bp);
8031
8032	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8033
8034	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8035	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8036		u32 off = PCI_FUNC(pdev->devfn) << 2;
8037
8038		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8039	} else
8040		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8041
8042	/* Get the permanent MAC address.  First we need to make sure the
8043	 * firmware is actually running.
8044	 */
8045	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8046
8047	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8048	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8049		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8050		rc = -ENODEV;
8051		goto err_out_unmap;
8052	}
8053
8054	bnx2_read_vpd_fw_ver(bp);
8055
8056	j = strlen(bp->fw_version);
8057	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8058	for (i = 0; i < 3 && j < 24; i++) {
8059		u8 num, k, skip0;
8060
8061		if (i == 0) {
8062			bp->fw_version[j++] = 'b';
8063			bp->fw_version[j++] = 'c';
8064			bp->fw_version[j++] = ' ';
8065		}
8066		num = (u8) (reg >> (24 - (i * 8)));
8067		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8068			if (num >= k || !skip0 || k == 1) {
8069				bp->fw_version[j++] = (num / k) + '0';
8070				skip0 = 0;
8071			}
8072		}
8073		if (i != 2)
8074			bp->fw_version[j++] = '.';
8075	}
8076	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8077	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8078		bp->wol = 1;
8079
8080	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8081		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8082
8083		for (i = 0; i < 30; i++) {
8084			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8085			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8086				break;
8087			msleep(10);
8088		}
8089	}
8090	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8091	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8092	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8093	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8094		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8095
8096		if (j < 32)
8097			bp->fw_version[j++] = ' ';
8098		for (i = 0; i < 3 && j < 28; i++) {
8099			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8100			reg = be32_to_cpu(reg);
8101			memcpy(&bp->fw_version[j], &reg, 4);
8102			j += 4;
8103		}
8104	}
8105
8106	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8107	bp->mac_addr[0] = (u8) (reg >> 8);
8108	bp->mac_addr[1] = (u8) reg;
8109
8110	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8111	bp->mac_addr[2] = (u8) (reg >> 24);
8112	bp->mac_addr[3] = (u8) (reg >> 16);
8113	bp->mac_addr[4] = (u8) (reg >> 8);
8114	bp->mac_addr[5] = (u8) reg;
8115
8116	bp->tx_ring_size = MAX_TX_DESC_CNT;
8117	bnx2_set_rx_ring_size(bp, 255);
8118
8119	bp->tx_quick_cons_trip_int = 2;
8120	bp->tx_quick_cons_trip = 20;
8121	bp->tx_ticks_int = 18;
8122	bp->tx_ticks = 80;
8123
8124	bp->rx_quick_cons_trip_int = 2;
8125	bp->rx_quick_cons_trip = 12;
8126	bp->rx_ticks_int = 18;
8127	bp->rx_ticks = 18;
8128
8129	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8130
8131	bp->current_interval = BNX2_TIMER_INTERVAL;
8132
8133	bp->phy_addr = 1;
8134
8135	/* Disable WOL support if we are running on a SERDES chip. */
8136	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8137		bnx2_get_5709_media(bp);
8138	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8139		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8140
8141	bp->phy_port = PORT_TP;
8142	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8143		bp->phy_port = PORT_FIBRE;
8144		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8145		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8146			bp->flags |= BNX2_FLAG_NO_WOL;
8147			bp->wol = 0;
8148		}
8149		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8150			/* Don't do parallel detect on this board because of
8151			 * some board problems.  The link will not go down
8152			 * if we do parallel detect.
8153			 */
8154			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8155			    pdev->subsystem_device == 0x310c)
8156				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8157		} else {
8158			bp->phy_addr = 2;
8159			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8160				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8161		}
8162	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8163		   CHIP_NUM(bp) == CHIP_NUM_5708)
8164		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8165	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8166		 (CHIP_REV(bp) == CHIP_REV_Ax ||
8167		  CHIP_REV(bp) == CHIP_REV_Bx))
8168		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8169
8170	bnx2_init_fw_cap(bp);
8171
8172	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8173	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8174	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8175	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8176		bp->flags |= BNX2_FLAG_NO_WOL;
8177		bp->wol = 0;
8178	}
8179
8180	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8181		bp->tx_quick_cons_trip_int =
8182			bp->tx_quick_cons_trip;
8183		bp->tx_ticks_int = bp->tx_ticks;
8184		bp->rx_quick_cons_trip_int =
8185			bp->rx_quick_cons_trip;
8186		bp->rx_ticks_int = bp->rx_ticks;
8187		bp->comp_prod_trip_int = bp->comp_prod_trip;
8188		bp->com_ticks_int = bp->com_ticks;
8189		bp->cmd_ticks_int = bp->cmd_ticks;
8190	}
8191
8192	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8193	 *
8194	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8195	 * with byte enables disabled on the unused 32-bit word.  This is legal
8196	 * but causes problems on the AMD 8132 which will eventually stop
8197	 * responding after a while.
8198	 *
8199	 * AMD believes this incompatibility is unique to the 5706, and
8200	 * prefers to locally disable MSI rather than globally disabling it.
8201	 */
8202	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8203		struct pci_dev *amd_8132 = NULL;
8204
8205		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8206						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8207						  amd_8132))) {
8208
8209			if (amd_8132->revision >= 0x10 &&
8210			    amd_8132->revision <= 0x13) {
8211				disable_msi = 1;
8212				pci_dev_put(amd_8132);
8213				break;
8214			}
8215		}
8216	}
8217
8218	bnx2_set_default_link(bp);
8219	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8220
8221	init_timer(&bp->timer);
8222	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8223	bp->timer.data = (unsigned long) bp;
8224	bp->timer.function = bnx2_timer;
8225
8226#ifdef BCM_CNIC
8227	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8228		bp->cnic_eth_dev.max_iscsi_conn =
8229			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8230			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8231#endif
8232	pci_save_state(pdev);
8233
8234	return 0;
8235
8236err_out_unmap:
8237	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8238		pci_disable_pcie_error_reporting(pdev);
8239		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8240	}
8241
8242	if (bp->regview) {
8243		iounmap(bp->regview);
8244		bp->regview = NULL;
8245	}
8246
8247err_out_release:
8248	pci_release_regions(pdev);
8249
8250err_out_disable:
8251	pci_disable_device(pdev);
8252	pci_set_drvdata(pdev, NULL);
8253
8254err_out:
8255	return rc;
8256}
8257
8258static char * __devinit
8259bnx2_bus_string(struct bnx2 *bp, char *str)
8260{
8261	char *s = str;
8262
8263	if (bp->flags & BNX2_FLAG_PCIE) {
8264		s += sprintf(s, "PCI Express");
8265	} else {
8266		s += sprintf(s, "PCI");
8267		if (bp->flags & BNX2_FLAG_PCIX)
8268			s += sprintf(s, "-X");
8269		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8270			s += sprintf(s, " 32-bit");
8271		else
8272			s += sprintf(s, " 64-bit");
8273		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8274	}
8275	return str;
8276}
8277
8278static void
8279bnx2_del_napi(struct bnx2 *bp)
8280{
8281	int i;
8282
8283	for (i = 0; i < bp->irq_nvecs; i++)
8284		netif_napi_del(&bp->bnx2_napi[i].napi);
8285}
8286
8287static void
8288bnx2_init_napi(struct bnx2 *bp)
8289{
8290	int i;
8291
8292	for (i = 0; i < bp->irq_nvecs; i++) {
8293		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8294		int (*poll)(struct napi_struct *, int);
8295
8296		if (i == 0)
8297			poll = bnx2_poll;
8298		else
8299			poll = bnx2_poll_msix;
8300
8301		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8302		bnapi->bp = bp;
8303	}
8304}
8305
8306static const struct net_device_ops bnx2_netdev_ops = {
8307	.ndo_open		= bnx2_open,
8308	.ndo_start_xmit		= bnx2_start_xmit,
8309	.ndo_stop		= bnx2_close,
8310	.ndo_get_stats64	= bnx2_get_stats64,
8311	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8312	.ndo_do_ioctl		= bnx2_ioctl,
8313	.ndo_validate_addr	= eth_validate_addr,
8314	.ndo_set_mac_address	= bnx2_change_mac_addr,
8315	.ndo_change_mtu		= bnx2_change_mtu,
8316	.ndo_fix_features	= bnx2_fix_features,
8317	.ndo_set_features	= bnx2_set_features,
8318	.ndo_tx_timeout		= bnx2_tx_timeout,
8319#ifdef CONFIG_NET_POLL_CONTROLLER
8320	.ndo_poll_controller	= poll_bnx2,
8321#endif
8322};
8323
8324static int __devinit
8325bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8326{
8327	static int version_printed = 0;
8328	struct net_device *dev = NULL;
8329	struct bnx2 *bp;
8330	int rc;
8331	char str[40];
8332
8333	if (version_printed++ == 0)
8334		pr_info("%s", version);
8335
8336	/* dev zeroed in init_etherdev */
8337	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8338
8339	if (!dev)
8340		return -ENOMEM;
8341
8342	rc = bnx2_init_board(pdev, dev);
8343	if (rc < 0) {
8344		free_netdev(dev);
8345		return rc;
8346	}
8347
8348	dev->netdev_ops = &bnx2_netdev_ops;
8349	dev->watchdog_timeo = TX_TIMEOUT;
8350	dev->ethtool_ops = &bnx2_ethtool_ops;
8351
8352	bp = netdev_priv(dev);
8353
8354	pci_set_drvdata(pdev, dev);
8355
8356	rc = bnx2_request_firmware(bp);
8357	if (rc)
8358		goto error;
8359
8360	memcpy(dev->dev_addr, bp->mac_addr, 6);
8361	memcpy(dev->perm_addr, bp->mac_addr, 6);
8362
8363	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8364		NETIF_F_TSO | NETIF_F_TSO_ECN |
8365		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8366
8367	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8368		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8369
8370	dev->vlan_features = dev->hw_features;
8371	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8372	dev->features |= dev->hw_features;
8373
8374	if ((rc = register_netdev(dev))) {
8375		dev_err(&pdev->dev, "Cannot register net device\n");
8376		goto error;
8377	}
8378
8379	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8380		    board_info[ent->driver_data].name,
8381		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8382		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8383		    bnx2_bus_string(bp, str),
8384		    dev->base_addr,
8385		    bp->pdev->irq, dev->dev_addr);
8386
8387	return 0;
8388
8389error:
8390	if (bp->mips_firmware)
8391		release_firmware(bp->mips_firmware);
8392	if (bp->rv2p_firmware)
8393		release_firmware(bp->rv2p_firmware);
8394
8395	if (bp->regview)
8396		iounmap(bp->regview);
8397	pci_release_regions(pdev);
8398	pci_disable_device(pdev);
8399	pci_set_drvdata(pdev, NULL);
8400	free_netdev(dev);
8401	return rc;
8402}
8403
8404static void __devexit
8405bnx2_remove_one(struct pci_dev *pdev)
8406{
8407	struct net_device *dev = pci_get_drvdata(pdev);
8408	struct bnx2 *bp = netdev_priv(dev);
8409
8410	unregister_netdev(dev);
8411
8412	del_timer_sync(&bp->timer);
8413	cancel_work_sync(&bp->reset_task);
8414
8415	if (bp->mips_firmware)
8416		release_firmware(bp->mips_firmware);
8417	if (bp->rv2p_firmware)
8418		release_firmware(bp->rv2p_firmware);
8419
8420	if (bp->regview)
8421		iounmap(bp->regview);
8422
8423	kfree(bp->temp_stats_blk);
8424
8425	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8426		pci_disable_pcie_error_reporting(pdev);
8427		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8428	}
8429
8430	free_netdev(dev);
8431
8432	pci_release_regions(pdev);
8433	pci_disable_device(pdev);
8434	pci_set_drvdata(pdev, NULL);
8435}
8436
8437static int
8438bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8439{
8440	struct net_device *dev = pci_get_drvdata(pdev);
8441	struct bnx2 *bp = netdev_priv(dev);
8442
8443	/* PCI register 4 needs to be saved whether netif_running() or not.
8444	 * MSI address and data need to be saved if using MSI and
8445	 * netif_running().
8446	 */
8447	pci_save_state(pdev);
8448	if (!netif_running(dev))
8449		return 0;
8450
8451	cancel_work_sync(&bp->reset_task);
8452	bnx2_netif_stop(bp, true);
8453	netif_device_detach(dev);
8454	del_timer_sync(&bp->timer);
8455	bnx2_shutdown_chip(bp);
8456	bnx2_free_skbs(bp);
8457	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8458	return 0;
8459}
8460
8461static int
8462bnx2_resume(struct pci_dev *pdev)
8463{
8464	struct net_device *dev = pci_get_drvdata(pdev);
8465	struct bnx2 *bp = netdev_priv(dev);
8466
8467	pci_restore_state(pdev);
8468	if (!netif_running(dev))
8469		return 0;
8470
8471	bnx2_set_power_state(bp, PCI_D0);
8472	netif_device_attach(dev);
8473	bnx2_init_nic(bp, 1);
8474	bnx2_netif_start(bp, true);
8475	return 0;
8476}
8477
8478/**
8479 * bnx2_io_error_detected - called when PCI error is detected
8480 * @pdev: Pointer to PCI device
8481 * @state: The current pci connection state
8482 *
8483 * This function is called after a PCI bus error affecting
8484 * this device has been detected.
8485 */
8486static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8487					       pci_channel_state_t state)
8488{
8489	struct net_device *dev = pci_get_drvdata(pdev);
8490	struct bnx2 *bp = netdev_priv(dev);
8491
8492	rtnl_lock();
8493	netif_device_detach(dev);
8494
8495	if (state == pci_channel_io_perm_failure) {
8496		rtnl_unlock();
8497		return PCI_ERS_RESULT_DISCONNECT;
8498	}
8499
8500	if (netif_running(dev)) {
8501		bnx2_netif_stop(bp, true);
8502		del_timer_sync(&bp->timer);
8503		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8504	}
8505
8506	pci_disable_device(pdev);
8507	rtnl_unlock();
8508
8509	/* Request a slot slot reset. */
8510	return PCI_ERS_RESULT_NEED_RESET;
8511}
8512
8513/**
8514 * bnx2_io_slot_reset - called after the pci bus has been reset.
8515 * @pdev: Pointer to PCI device
8516 *
8517 * Restart the card from scratch, as if from a cold-boot.
8518 */
8519static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8520{
8521	struct net_device *dev = pci_get_drvdata(pdev);
8522	struct bnx2 *bp = netdev_priv(dev);
8523	pci_ers_result_t result;
8524	int err;
8525
8526	rtnl_lock();
8527	if (pci_enable_device(pdev)) {
8528		dev_err(&pdev->dev,
8529			"Cannot re-enable PCI device after reset\n");
8530		result = PCI_ERS_RESULT_DISCONNECT;
8531	} else {
8532		pci_set_master(pdev);
8533		pci_restore_state(pdev);
8534		pci_save_state(pdev);
8535
8536		if (netif_running(dev)) {
8537			bnx2_set_power_state(bp, PCI_D0);
8538			bnx2_init_nic(bp, 1);
8539		}
8540		result = PCI_ERS_RESULT_RECOVERED;
8541	}
8542	rtnl_unlock();
8543
8544	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8545		return result;
8546
8547	err = pci_cleanup_aer_uncorrect_error_status(pdev);
8548	if (err) {
8549		dev_err(&pdev->dev,
8550			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8551			 err); /* non-fatal, continue */
8552	}
8553
8554	return result;
8555}
8556
8557/**
8558 * bnx2_io_resume - called when traffic can start flowing again.
8559 * @pdev: Pointer to PCI device
8560 *
8561 * This callback is called when the error recovery driver tells us that
8562 * its OK to resume normal operation.
8563 */
8564static void bnx2_io_resume(struct pci_dev *pdev)
8565{
8566	struct net_device *dev = pci_get_drvdata(pdev);
8567	struct bnx2 *bp = netdev_priv(dev);
8568
8569	rtnl_lock();
8570	if (netif_running(dev))
8571		bnx2_netif_start(bp, true);
8572
8573	netif_device_attach(dev);
8574	rtnl_unlock();
8575}
8576
8577static struct pci_error_handlers bnx2_err_handler = {
8578	.error_detected	= bnx2_io_error_detected,
8579	.slot_reset	= bnx2_io_slot_reset,
8580	.resume		= bnx2_io_resume,
8581};
8582
8583static struct pci_driver bnx2_pci_driver = {
8584	.name		= DRV_MODULE_NAME,
8585	.id_table	= bnx2_pci_tbl,
8586	.probe		= bnx2_init_one,
8587	.remove		= __devexit_p(bnx2_remove_one),
8588	.suspend	= bnx2_suspend,
8589	.resume		= bnx2_resume,
8590	.err_handler	= &bnx2_err_handler,
8591};
8592
8593static int __init bnx2_init(void)
8594{
8595	return pci_register_driver(&bnx2_pci_driver);
8596}
8597
8598static void __exit bnx2_cleanup(void)
8599{
8600	pci_unregister_driver(&bnx2_pci_driver);
8601}
8602
8603module_init(bnx2_init);
8604module_exit(bnx2_cleanup);
8605
8606
8607