Linux Audio

Check our new training course

Loading...
v6.8
   1/* bnx2.c: QLogic bnx2 network driver.
   2 *
   3 * Copyright (c) 2004-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by: Michael Chan  (mchan@broadcom.com)
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/moduleparam.h>
  17
  18#include <linux/stringify.h>
  19#include <linux/kernel.h>
  20#include <linux/timer.h>
  21#include <linux/errno.h>
  22#include <linux/ioport.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/interrupt.h>
  26#include <linux/pci.h>
  27#include <linux/netdevice.h>
  28#include <linux/etherdevice.h>
  29#include <linux/skbuff.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/bitops.h>
  32#include <asm/io.h>
  33#include <asm/irq.h>
  34#include <linux/delay.h>
  35#include <asm/byteorder.h>
  36#include <asm/page.h>
  37#include <linux/time.h>
  38#include <linux/ethtool.h>
  39#include <linux/mii.h>
  40#include <linux/if.h>
  41#include <linux/if_vlan.h>
  42#include <net/ip.h>
  43#include <net/tcp.h>
  44#include <net/checksum.h>
  45#include <linux/workqueue.h>
  46#include <linux/crc32.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/firmware.h>
  50#include <linux/log2.h>
 
  51#include <linux/crash_dump.h>
  52
  53#if IS_ENABLED(CONFIG_CNIC)
  54#define BCM_CNIC 1
  55#include "cnic_if.h"
  56#endif
  57#include "bnx2.h"
  58#include "bnx2_fw.h"
  59
  60#define DRV_MODULE_NAME		"bnx2"
  61#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
  62#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
  63#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
  64#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  65#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
  66
  67#define RUN_AT(x) (jiffies + (x))
  68
  69/* Time in jiffies before concluding the transmitter is hung. */
  70#define TX_TIMEOUT  (5*HZ)
  71
  72MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  73MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
  74MODULE_LICENSE("GPL");
  75MODULE_FIRMWARE(FW_MIPS_FILE_06);
  76MODULE_FIRMWARE(FW_RV2P_FILE_06);
  77MODULE_FIRMWARE(FW_MIPS_FILE_09);
  78MODULE_FIRMWARE(FW_RV2P_FILE_09);
  79MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  80
  81static int disable_msi = 0;
  82
  83module_param(disable_msi, int, 0444);
  84MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  85
  86typedef enum {
  87	BCM5706 = 0,
  88	NC370T,
  89	NC370I,
  90	BCM5706S,
  91	NC370F,
  92	BCM5708,
  93	BCM5708S,
  94	BCM5709,
  95	BCM5709S,
  96	BCM5716,
  97	BCM5716S,
  98} board_t;
  99
 100/* indexed by board_t, above */
 101static struct {
 102	char *name;
 103} board_info[] = {
 104	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
 105	{ "HP NC370T Multifunction Gigabit Server Adapter" },
 106	{ "HP NC370i Multifunction Gigabit Server Adapter" },
 107	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 108	{ "HP NC370F Multifunction Gigabit Server Adapter" },
 109	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
 110	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 111	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
 112	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 113	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
 114	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 115	};
 116
 117static const struct pci_device_id bnx2_pci_tbl[] = {
 118	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 119	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 120	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 121	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 122	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 123	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 124	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 125	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 126	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 127	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 128	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 129	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 130	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 131	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 132	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 133	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 134	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 135	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 136	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
 137	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 138	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
 139	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 140	{ 0, }
 141};
 142
 143static const struct flash_spec flash_table[] =
 144{
 145#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 146#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
 147	/* Slow EEPROM */
 148	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 149	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 150	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 151	 "EEPROM - slow"},
 152	/* Expansion entry 0001 */
 153	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 154	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 155	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 156	 "Entry 0001"},
 157	/* Saifun SA25F010 (non-buffered flash) */
 158	/* strap, cfg1, & write1 need updates */
 159	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 160	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 161	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 162	 "Non-buffered flash (128kB)"},
 163	/* Saifun SA25F020 (non-buffered flash) */
 164	/* strap, cfg1, & write1 need updates */
 165	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 166	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 167	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 168	 "Non-buffered flash (256kB)"},
 169	/* Expansion entry 0100 */
 170	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 171	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 172	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 173	 "Entry 0100"},
 174	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 175	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 176	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 177	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 178	 "Entry 0101: ST M45PE10 (128kB non-buffered)"},
 179	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
 180	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 181	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 182	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 183	 "Entry 0110: ST M45PE20 (256kB non-buffered)"},
 184	/* Saifun SA25F005 (non-buffered flash) */
 185	/* strap, cfg1, & write1 need updates */
 186	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 187	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 188	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 189	 "Non-buffered flash (64kB)"},
 190	/* Fast EEPROM */
 191	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 192	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 193	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 194	 "EEPROM - fast"},
 195	/* Expansion entry 1001 */
 196	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 197	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 198	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 199	 "Entry 1001"},
 200	/* Expansion entry 1010 */
 201	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 202	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 203	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 204	 "Entry 1010"},
 205	/* ATMEL AT45DB011B (buffered flash) */
 206	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 207	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 208	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 209	 "Buffered flash (128kB)"},
 210	/* Expansion entry 1100 */
 211	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 212	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 213	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 214	 "Entry 1100"},
 215	/* Expansion entry 1101 */
 216	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 217	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 218	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 219	 "Entry 1101"},
 220	/* Ateml Expansion entry 1110 */
 221	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 222	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 223	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 224	 "Entry 1110 (Atmel)"},
 225	/* ATMEL AT45DB021B (buffered flash) */
 226	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 227	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 228	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 229	 "Buffered flash (256kB)"},
 230};
 231
 232static const struct flash_spec flash_5709 = {
 233	.flags		= BNX2_NV_BUFFERED,
 234	.page_bits	= BCM5709_FLASH_PAGE_BITS,
 235	.page_size	= BCM5709_FLASH_PAGE_SIZE,
 236	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
 237	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
 238	.name		= "5709 Buffered flash (256kB)",
 239};
 240
 241MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 242
 243static void bnx2_init_napi(struct bnx2 *bp);
 244static void bnx2_del_napi(struct bnx2 *bp);
 245
 246static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 247{
 248	u32 diff;
 249
 250	/* The ring uses 256 indices for 255 entries, one of them
 251	 * needs to be skipped.
 252	 */
 253	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
 254	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
 255		diff &= 0xffff;
 256		if (diff == BNX2_TX_DESC_CNT)
 257			diff = BNX2_MAX_TX_DESC_CNT;
 258	}
 259	return bp->tx_ring_size - diff;
 260}
 261
 262static u32
 263bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 264{
 265	unsigned long flags;
 266	u32 val;
 267
 268	spin_lock_irqsave(&bp->indirect_lock, flags);
 269	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 270	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
 271	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 272	return val;
 273}
 274
 275static void
 276bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 277{
 278	unsigned long flags;
 279
 280	spin_lock_irqsave(&bp->indirect_lock, flags);
 281	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 282	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 283	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 284}
 285
 286static void
 287bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 288{
 289	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 290}
 291
 292static u32
 293bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 294{
 295	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
 296}
 297
 298static void
 299bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 300{
 301	unsigned long flags;
 302
 303	offset += cid_addr;
 304	spin_lock_irqsave(&bp->indirect_lock, flags);
 305	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 306		int i;
 307
 308		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
 309		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
 310			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 311		for (i = 0; i < 5; i++) {
 312			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
 313			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 314				break;
 315			udelay(5);
 316		}
 317	} else {
 318		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
 319		BNX2_WR(bp, BNX2_CTX_DATA, val);
 320	}
 321	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 322}
 323
 324#ifdef BCM_CNIC
 325static int
 326bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 327{
 328	struct bnx2 *bp = netdev_priv(dev);
 329	struct drv_ctl_io *io = &info->data.io;
 330
 331	switch (info->cmd) {
 332	case DRV_CTL_IO_WR_CMD:
 333		bnx2_reg_wr_ind(bp, io->offset, io->data);
 334		break;
 335	case DRV_CTL_IO_RD_CMD:
 336		io->data = bnx2_reg_rd_ind(bp, io->offset);
 337		break;
 338	case DRV_CTL_CTX_WR_CMD:
 339		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 340		break;
 341	default:
 342		return -EINVAL;
 343	}
 344	return 0;
 345}
 346
 347static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 348{
 349	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 350	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 351	int sb_id;
 352
 353	if (bp->flags & BNX2_FLAG_USING_MSIX) {
 354		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 355		bnapi->cnic_present = 0;
 356		sb_id = bp->irq_nvecs;
 357		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 358	} else {
 359		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 360		bnapi->cnic_tag = bnapi->last_status_idx;
 361		bnapi->cnic_present = 1;
 362		sb_id = 0;
 363		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 364	}
 365
 366	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 367	cp->irq_arr[0].status_blk = (void *)
 368		((unsigned long) bnapi->status_blk.msi +
 369		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 370	cp->irq_arr[0].status_blk_num = sb_id;
 371	cp->num_irq = 1;
 372}
 373
 374static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 375			      void *data)
 376{
 377	struct bnx2 *bp = netdev_priv(dev);
 378	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 379
 380	if (!ops)
 381		return -EINVAL;
 382
 383	if (cp->drv_state & CNIC_DRV_STATE_REGD)
 384		return -EBUSY;
 385
 386	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
 387		return -ENODEV;
 388
 389	bp->cnic_data = data;
 390	rcu_assign_pointer(bp->cnic_ops, ops);
 391
 392	cp->num_irq = 0;
 393	cp->drv_state = CNIC_DRV_STATE_REGD;
 394
 395	bnx2_setup_cnic_irq_info(bp);
 396
 397	return 0;
 398}
 399
 400static int bnx2_unregister_cnic(struct net_device *dev)
 401{
 402	struct bnx2 *bp = netdev_priv(dev);
 403	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 404	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 405
 406	mutex_lock(&bp->cnic_lock);
 407	cp->drv_state = 0;
 408	bnapi->cnic_present = 0;
 409	RCU_INIT_POINTER(bp->cnic_ops, NULL);
 410	mutex_unlock(&bp->cnic_lock);
 411	synchronize_rcu();
 412	return 0;
 413}
 414
 415static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 416{
 417	struct bnx2 *bp = netdev_priv(dev);
 418	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 419
 420	if (!cp->max_iscsi_conn)
 421		return NULL;
 422
 423	cp->drv_owner = THIS_MODULE;
 424	cp->chip_id = bp->chip_id;
 425	cp->pdev = bp->pdev;
 426	cp->io_base = bp->regview;
 427	cp->drv_ctl = bnx2_drv_ctl;
 428	cp->drv_register_cnic = bnx2_register_cnic;
 429	cp->drv_unregister_cnic = bnx2_unregister_cnic;
 430
 431	return cp;
 432}
 433
 434static void
 435bnx2_cnic_stop(struct bnx2 *bp)
 436{
 437	struct cnic_ops *c_ops;
 438	struct cnic_ctl_info info;
 439
 440	mutex_lock(&bp->cnic_lock);
 441	c_ops = rcu_dereference_protected(bp->cnic_ops,
 442					  lockdep_is_held(&bp->cnic_lock));
 443	if (c_ops) {
 444		info.cmd = CNIC_CTL_STOP_CMD;
 445		c_ops->cnic_ctl(bp->cnic_data, &info);
 446	}
 447	mutex_unlock(&bp->cnic_lock);
 448}
 449
 450static void
 451bnx2_cnic_start(struct bnx2 *bp)
 452{
 453	struct cnic_ops *c_ops;
 454	struct cnic_ctl_info info;
 455
 456	mutex_lock(&bp->cnic_lock);
 457	c_ops = rcu_dereference_protected(bp->cnic_ops,
 458					  lockdep_is_held(&bp->cnic_lock));
 459	if (c_ops) {
 460		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 461			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 462
 463			bnapi->cnic_tag = bnapi->last_status_idx;
 464		}
 465		info.cmd = CNIC_CTL_START_CMD;
 466		c_ops->cnic_ctl(bp->cnic_data, &info);
 467	}
 468	mutex_unlock(&bp->cnic_lock);
 469}
 470
 471#else
 472
 473static void
 474bnx2_cnic_stop(struct bnx2 *bp)
 475{
 476}
 477
 478static void
 479bnx2_cnic_start(struct bnx2 *bp)
 480{
 481}
 482
 483#endif
 484
 485static int
 486bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 487{
 488	u32 val1;
 489	int i, ret;
 490
 491	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 492		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 493		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 494
 495		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 496		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 497
 498		udelay(40);
 499	}
 500
 501	val1 = (bp->phy_addr << 21) | (reg << 16) |
 502		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 503		BNX2_EMAC_MDIO_COMM_START_BUSY;
 504	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 505
 506	for (i = 0; i < 50; i++) {
 507		udelay(10);
 508
 509		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 510		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 511			udelay(5);
 512
 513			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 514			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 515
 516			break;
 517		}
 518	}
 519
 520	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 521		*val = 0x0;
 522		ret = -EBUSY;
 523	}
 524	else {
 525		*val = val1;
 526		ret = 0;
 527	}
 528
 529	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 530		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 531		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 532
 533		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 534		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 535
 536		udelay(40);
 537	}
 538
 539	return ret;
 540}
 541
 542static int
 543bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 544{
 545	u32 val1;
 546	int i, ret;
 547
 548	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 549		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 550		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 551
 552		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 553		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 554
 555		udelay(40);
 556	}
 557
 558	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 559		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 560		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 561	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 562
 563	for (i = 0; i < 50; i++) {
 564		udelay(10);
 565
 566		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 567		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 568			udelay(5);
 569			break;
 570		}
 571	}
 572
 573	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 574		ret = -EBUSY;
 575	else
 576		ret = 0;
 577
 578	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 579		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 580		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 581
 582		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 583		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 584
 585		udelay(40);
 586	}
 587
 588	return ret;
 589}
 590
 591static void
 592bnx2_disable_int(struct bnx2 *bp)
 593{
 594	int i;
 595	struct bnx2_napi *bnapi;
 596
 597	for (i = 0; i < bp->irq_nvecs; i++) {
 598		bnapi = &bp->bnx2_napi[i];
 599		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 600		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 601	}
 602	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 603}
 604
 605static void
 606bnx2_enable_int(struct bnx2 *bp)
 607{
 608	int i;
 609	struct bnx2_napi *bnapi;
 610
 611	for (i = 0; i < bp->irq_nvecs; i++) {
 612		bnapi = &bp->bnx2_napi[i];
 613
 614		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 615			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 616			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 617			bnapi->last_status_idx);
 618
 619		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 620			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 621			bnapi->last_status_idx);
 622	}
 623	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 624}
 625
 626static void
 627bnx2_disable_int_sync(struct bnx2 *bp)
 628{
 629	int i;
 630
 631	atomic_inc(&bp->intr_sem);
 632	if (!netif_running(bp->dev))
 633		return;
 634
 635	bnx2_disable_int(bp);
 636	for (i = 0; i < bp->irq_nvecs; i++)
 637		synchronize_irq(bp->irq_tbl[i].vector);
 638}
 639
 640static void
 641bnx2_napi_disable(struct bnx2 *bp)
 642{
 643	int i;
 644
 645	for (i = 0; i < bp->irq_nvecs; i++)
 646		napi_disable(&bp->bnx2_napi[i].napi);
 647}
 648
 649static void
 650bnx2_napi_enable(struct bnx2 *bp)
 651{
 652	int i;
 653
 654	for (i = 0; i < bp->irq_nvecs; i++)
 655		napi_enable(&bp->bnx2_napi[i].napi);
 656}
 657
 658static void
 659bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 660{
 661	if (stop_cnic)
 662		bnx2_cnic_stop(bp);
 663	if (netif_running(bp->dev)) {
 664		bnx2_napi_disable(bp);
 665		netif_tx_disable(bp->dev);
 666	}
 667	bnx2_disable_int_sync(bp);
 668	netif_carrier_off(bp->dev);	/* prevent tx timeout */
 669}
 670
 671static void
 672bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 673{
 674	if (atomic_dec_and_test(&bp->intr_sem)) {
 675		if (netif_running(bp->dev)) {
 676			netif_tx_wake_all_queues(bp->dev);
 677			spin_lock_bh(&bp->phy_lock);
 678			if (bp->link_up)
 679				netif_carrier_on(bp->dev);
 680			spin_unlock_bh(&bp->phy_lock);
 681			bnx2_napi_enable(bp);
 682			bnx2_enable_int(bp);
 683			if (start_cnic)
 684				bnx2_cnic_start(bp);
 685		}
 686	}
 687}
 688
 689static void
 690bnx2_free_tx_mem(struct bnx2 *bp)
 691{
 692	int i;
 693
 694	for (i = 0; i < bp->num_tx_rings; i++) {
 695		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 696		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 697
 698		if (txr->tx_desc_ring) {
 699			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 700					  txr->tx_desc_ring,
 701					  txr->tx_desc_mapping);
 702			txr->tx_desc_ring = NULL;
 703		}
 704		kfree(txr->tx_buf_ring);
 705		txr->tx_buf_ring = NULL;
 706	}
 707}
 708
 709static void
 710bnx2_free_rx_mem(struct bnx2 *bp)
 711{
 712	int i;
 713
 714	for (i = 0; i < bp->num_rx_rings; i++) {
 715		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 716		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 717		int j;
 718
 719		for (j = 0; j < bp->rx_max_ring; j++) {
 720			if (rxr->rx_desc_ring[j])
 721				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 722						  rxr->rx_desc_ring[j],
 723						  rxr->rx_desc_mapping[j]);
 724			rxr->rx_desc_ring[j] = NULL;
 725		}
 726		vfree(rxr->rx_buf_ring);
 727		rxr->rx_buf_ring = NULL;
 728
 729		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 730			if (rxr->rx_pg_desc_ring[j])
 731				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 732						  rxr->rx_pg_desc_ring[j],
 733						  rxr->rx_pg_desc_mapping[j]);
 734			rxr->rx_pg_desc_ring[j] = NULL;
 735		}
 736		vfree(rxr->rx_pg_ring);
 737		rxr->rx_pg_ring = NULL;
 738	}
 739}
 740
 741static int
 742bnx2_alloc_tx_mem(struct bnx2 *bp)
 743{
 744	int i;
 745
 746	for (i = 0; i < bp->num_tx_rings; i++) {
 747		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 748		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 749
 750		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 751		if (!txr->tx_buf_ring)
 752			return -ENOMEM;
 753
 754		txr->tx_desc_ring =
 755			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 756					   &txr->tx_desc_mapping, GFP_KERNEL);
 757		if (!txr->tx_desc_ring)
 758			return -ENOMEM;
 759	}
 760	return 0;
 761}
 762
 763static int
 764bnx2_alloc_rx_mem(struct bnx2 *bp)
 765{
 766	int i;
 767
 768	for (i = 0; i < bp->num_rx_rings; i++) {
 769		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 770		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 771		int j;
 772
 773		rxr->rx_buf_ring =
 774			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
 775		if (!rxr->rx_buf_ring)
 776			return -ENOMEM;
 777
 778		for (j = 0; j < bp->rx_max_ring; j++) {
 779			rxr->rx_desc_ring[j] =
 780				dma_alloc_coherent(&bp->pdev->dev,
 781						   RXBD_RING_SIZE,
 782						   &rxr->rx_desc_mapping[j],
 783						   GFP_KERNEL);
 784			if (!rxr->rx_desc_ring[j])
 785				return -ENOMEM;
 786
 787		}
 788
 789		if (bp->rx_pg_ring_size) {
 790			rxr->rx_pg_ring =
 791				vzalloc(array_size(SW_RXPG_RING_SIZE,
 792						   bp->rx_max_pg_ring));
 793			if (!rxr->rx_pg_ring)
 794				return -ENOMEM;
 795
 796		}
 797
 798		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 799			rxr->rx_pg_desc_ring[j] =
 800				dma_alloc_coherent(&bp->pdev->dev,
 801						   RXBD_RING_SIZE,
 802						   &rxr->rx_pg_desc_mapping[j],
 803						   GFP_KERNEL);
 804			if (!rxr->rx_pg_desc_ring[j])
 805				return -ENOMEM;
 806
 807		}
 808	}
 809	return 0;
 810}
 811
 812static void
 813bnx2_free_stats_blk(struct net_device *dev)
 814{
 815	struct bnx2 *bp = netdev_priv(dev);
 816
 817	if (bp->status_blk) {
 818		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
 819				  bp->status_blk,
 820				  bp->status_blk_mapping);
 821		bp->status_blk = NULL;
 822		bp->stats_blk = NULL;
 823	}
 824}
 825
 826static int
 827bnx2_alloc_stats_blk(struct net_device *dev)
 828{
 829	int status_blk_size;
 830	void *status_blk;
 831	struct bnx2 *bp = netdev_priv(dev);
 832
 833	/* Combine status and statistics blocks into one allocation. */
 834	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 835	if (bp->flags & BNX2_FLAG_MSIX_CAP)
 836		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 837						 BNX2_SBLK_MSIX_ALIGN_SIZE);
 838	bp->status_stats_size = status_blk_size +
 839				sizeof(struct statistics_block);
 840	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 841					&bp->status_blk_mapping, GFP_KERNEL);
 842	if (!status_blk)
 843		return -ENOMEM;
 844
 845	bp->status_blk = status_blk;
 846	bp->stats_blk = status_blk + status_blk_size;
 847	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 848
 849	return 0;
 850}
 851
 852static void
 853bnx2_free_mem(struct bnx2 *bp)
 854{
 855	int i;
 856	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 857
 858	bnx2_free_tx_mem(bp);
 859	bnx2_free_rx_mem(bp);
 860
 861	for (i = 0; i < bp->ctx_pages; i++) {
 862		if (bp->ctx_blk[i]) {
 863			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
 864					  bp->ctx_blk[i],
 865					  bp->ctx_blk_mapping[i]);
 866			bp->ctx_blk[i] = NULL;
 867		}
 868	}
 869
 870	if (bnapi->status_blk.msi)
 871		bnapi->status_blk.msi = NULL;
 872}
 873
 874static int
 875bnx2_alloc_mem(struct bnx2 *bp)
 876{
 877	int i, err;
 878	struct bnx2_napi *bnapi;
 879
 880	bnapi = &bp->bnx2_napi[0];
 881	bnapi->status_blk.msi = bp->status_blk;
 882	bnapi->hw_tx_cons_ptr =
 883		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 884	bnapi->hw_rx_cons_ptr =
 885		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 886	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 887		for (i = 1; i < bp->irq_nvecs; i++) {
 888			struct status_block_msix *sblk;
 889
 890			bnapi = &bp->bnx2_napi[i];
 891
 892			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 893			bnapi->status_blk.msix = sblk;
 894			bnapi->hw_tx_cons_ptr =
 895				&sblk->status_tx_quick_consumer_index;
 896			bnapi->hw_rx_cons_ptr =
 897				&sblk->status_rx_quick_consumer_index;
 898			bnapi->int_num = i << 24;
 899		}
 900	}
 901
 902	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 903		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
 904		if (bp->ctx_pages == 0)
 905			bp->ctx_pages = 1;
 906		for (i = 0; i < bp->ctx_pages; i++) {
 907			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
 908						BNX2_PAGE_SIZE,
 909						&bp->ctx_blk_mapping[i],
 910						GFP_KERNEL);
 911			if (!bp->ctx_blk[i])
 912				goto alloc_mem_err;
 913		}
 914	}
 915
 916	err = bnx2_alloc_rx_mem(bp);
 917	if (err)
 918		goto alloc_mem_err;
 919
 920	err = bnx2_alloc_tx_mem(bp);
 921	if (err)
 922		goto alloc_mem_err;
 923
 924	return 0;
 925
 926alloc_mem_err:
 927	bnx2_free_mem(bp);
 928	return -ENOMEM;
 929}
 930
 931static void
 932bnx2_report_fw_link(struct bnx2 *bp)
 933{
 934	u32 fw_link_status = 0;
 935
 936	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 937		return;
 938
 939	if (bp->link_up) {
 940		u32 bmsr;
 941
 942		switch (bp->line_speed) {
 943		case SPEED_10:
 944			if (bp->duplex == DUPLEX_HALF)
 945				fw_link_status = BNX2_LINK_STATUS_10HALF;
 946			else
 947				fw_link_status = BNX2_LINK_STATUS_10FULL;
 948			break;
 949		case SPEED_100:
 950			if (bp->duplex == DUPLEX_HALF)
 951				fw_link_status = BNX2_LINK_STATUS_100HALF;
 952			else
 953				fw_link_status = BNX2_LINK_STATUS_100FULL;
 954			break;
 955		case SPEED_1000:
 956			if (bp->duplex == DUPLEX_HALF)
 957				fw_link_status = BNX2_LINK_STATUS_1000HALF;
 958			else
 959				fw_link_status = BNX2_LINK_STATUS_1000FULL;
 960			break;
 961		case SPEED_2500:
 962			if (bp->duplex == DUPLEX_HALF)
 963				fw_link_status = BNX2_LINK_STATUS_2500HALF;
 964			else
 965				fw_link_status = BNX2_LINK_STATUS_2500FULL;
 966			break;
 967		}
 968
 969		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 970
 971		if (bp->autoneg) {
 972			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 973
 974			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 975			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 976
 977			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 978			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 979				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 980			else
 981				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 982		}
 983	}
 984	else
 985		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 986
 987	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 988}
 989
 990static char *
 991bnx2_xceiver_str(struct bnx2 *bp)
 992{
 993	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
 994		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 995		 "Copper");
 996}
 997
 998static void
 999bnx2_report_link(struct bnx2 *bp)
1000{
1001	if (bp->link_up) {
1002		netif_carrier_on(bp->dev);
1003		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1004			    bnx2_xceiver_str(bp),
1005			    bp->line_speed,
1006			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1007
1008		if (bp->flow_ctrl) {
1009			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1010				pr_cont(", receive ");
1011				if (bp->flow_ctrl & FLOW_CTRL_TX)
1012					pr_cont("& transmit ");
1013			}
1014			else {
1015				pr_cont(", transmit ");
1016			}
1017			pr_cont("flow control ON");
1018		}
1019		pr_cont("\n");
1020	} else {
1021		netif_carrier_off(bp->dev);
1022		netdev_err(bp->dev, "NIC %s Link is Down\n",
1023			   bnx2_xceiver_str(bp));
1024	}
1025
1026	bnx2_report_fw_link(bp);
1027}
1028
1029static void
1030bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1031{
1032	u32 local_adv, remote_adv;
1033
1034	bp->flow_ctrl = 0;
1035	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1036		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1037
1038		if (bp->duplex == DUPLEX_FULL) {
1039			bp->flow_ctrl = bp->req_flow_ctrl;
1040		}
1041		return;
1042	}
1043
1044	if (bp->duplex != DUPLEX_FULL) {
1045		return;
1046	}
1047
1048	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1049	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1050		u32 val;
1051
1052		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1053		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1054			bp->flow_ctrl |= FLOW_CTRL_TX;
1055		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1056			bp->flow_ctrl |= FLOW_CTRL_RX;
1057		return;
1058	}
1059
1060	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1061	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1062
1063	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1064		u32 new_local_adv = 0;
1065		u32 new_remote_adv = 0;
1066
1067		if (local_adv & ADVERTISE_1000XPAUSE)
1068			new_local_adv |= ADVERTISE_PAUSE_CAP;
1069		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1070			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1071		if (remote_adv & ADVERTISE_1000XPAUSE)
1072			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1073		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1074			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1075
1076		local_adv = new_local_adv;
1077		remote_adv = new_remote_adv;
1078	}
1079
1080	/* See Table 28B-3 of 802.3ab-1999 spec. */
1081	if (local_adv & ADVERTISE_PAUSE_CAP) {
1082		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1083	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1084				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1085			}
1086			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1087				bp->flow_ctrl = FLOW_CTRL_RX;
1088			}
1089		}
1090		else {
1091			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1092				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1093			}
1094		}
1095	}
1096	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1097		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1098			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1099
1100			bp->flow_ctrl = FLOW_CTRL_TX;
1101		}
1102	}
1103}
1104
1105static int
1106bnx2_5709s_linkup(struct bnx2 *bp)
1107{
1108	u32 val, speed;
1109
1110	bp->link_up = 1;
1111
1112	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1113	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1114	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1115
1116	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1117		bp->line_speed = bp->req_line_speed;
1118		bp->duplex = bp->req_duplex;
1119		return 0;
1120	}
1121	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1122	switch (speed) {
1123		case MII_BNX2_GP_TOP_AN_SPEED_10:
1124			bp->line_speed = SPEED_10;
1125			break;
1126		case MII_BNX2_GP_TOP_AN_SPEED_100:
1127			bp->line_speed = SPEED_100;
1128			break;
1129		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1130		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1131			bp->line_speed = SPEED_1000;
1132			break;
1133		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1134			bp->line_speed = SPEED_2500;
1135			break;
1136	}
1137	if (val & MII_BNX2_GP_TOP_AN_FD)
1138		bp->duplex = DUPLEX_FULL;
1139	else
1140		bp->duplex = DUPLEX_HALF;
1141	return 0;
1142}
1143
1144static int
1145bnx2_5708s_linkup(struct bnx2 *bp)
1146{
1147	u32 val;
1148
1149	bp->link_up = 1;
1150	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1151	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1152		case BCM5708S_1000X_STAT1_SPEED_10:
1153			bp->line_speed = SPEED_10;
1154			break;
1155		case BCM5708S_1000X_STAT1_SPEED_100:
1156			bp->line_speed = SPEED_100;
1157			break;
1158		case BCM5708S_1000X_STAT1_SPEED_1G:
1159			bp->line_speed = SPEED_1000;
1160			break;
1161		case BCM5708S_1000X_STAT1_SPEED_2G5:
1162			bp->line_speed = SPEED_2500;
1163			break;
1164	}
1165	if (val & BCM5708S_1000X_STAT1_FD)
1166		bp->duplex = DUPLEX_FULL;
1167	else
1168		bp->duplex = DUPLEX_HALF;
1169
1170	return 0;
1171}
1172
1173static int
1174bnx2_5706s_linkup(struct bnx2 *bp)
1175{
1176	u32 bmcr, local_adv, remote_adv, common;
1177
1178	bp->link_up = 1;
1179	bp->line_speed = SPEED_1000;
1180
1181	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1182	if (bmcr & BMCR_FULLDPLX) {
1183		bp->duplex = DUPLEX_FULL;
1184	}
1185	else {
1186		bp->duplex = DUPLEX_HALF;
1187	}
1188
1189	if (!(bmcr & BMCR_ANENABLE)) {
1190		return 0;
1191	}
1192
1193	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1194	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1195
1196	common = local_adv & remote_adv;
1197	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1198
1199		if (common & ADVERTISE_1000XFULL) {
1200			bp->duplex = DUPLEX_FULL;
1201		}
1202		else {
1203			bp->duplex = DUPLEX_HALF;
1204		}
1205	}
1206
1207	return 0;
1208}
1209
1210static int
1211bnx2_copper_linkup(struct bnx2 *bp)
1212{
1213	u32 bmcr;
1214
1215	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1216
1217	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1218	if (bmcr & BMCR_ANENABLE) {
1219		u32 local_adv, remote_adv, common;
1220
1221		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1222		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1223
1224		common = local_adv & (remote_adv >> 2);
1225		if (common & ADVERTISE_1000FULL) {
1226			bp->line_speed = SPEED_1000;
1227			bp->duplex = DUPLEX_FULL;
1228		}
1229		else if (common & ADVERTISE_1000HALF) {
1230			bp->line_speed = SPEED_1000;
1231			bp->duplex = DUPLEX_HALF;
1232		}
1233		else {
1234			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1235			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1236
1237			common = local_adv & remote_adv;
1238			if (common & ADVERTISE_100FULL) {
1239				bp->line_speed = SPEED_100;
1240				bp->duplex = DUPLEX_FULL;
1241			}
1242			else if (common & ADVERTISE_100HALF) {
1243				bp->line_speed = SPEED_100;
1244				bp->duplex = DUPLEX_HALF;
1245			}
1246			else if (common & ADVERTISE_10FULL) {
1247				bp->line_speed = SPEED_10;
1248				bp->duplex = DUPLEX_FULL;
1249			}
1250			else if (common & ADVERTISE_10HALF) {
1251				bp->line_speed = SPEED_10;
1252				bp->duplex = DUPLEX_HALF;
1253			}
1254			else {
1255				bp->line_speed = 0;
1256				bp->link_up = 0;
1257			}
1258		}
1259	}
1260	else {
1261		if (bmcr & BMCR_SPEED100) {
1262			bp->line_speed = SPEED_100;
1263		}
1264		else {
1265			bp->line_speed = SPEED_10;
1266		}
1267		if (bmcr & BMCR_FULLDPLX) {
1268			bp->duplex = DUPLEX_FULL;
1269		}
1270		else {
1271			bp->duplex = DUPLEX_HALF;
1272		}
1273	}
1274
1275	if (bp->link_up) {
1276		u32 ext_status;
1277
1278		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1279		if (ext_status & EXT_STATUS_MDIX)
1280			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1281	}
1282
1283	return 0;
1284}
1285
1286static void
1287bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1288{
1289	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1290
1291	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1292	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1293	val |= 0x02 << 8;
1294
1295	if (bp->flow_ctrl & FLOW_CTRL_TX)
1296		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1297
1298	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1299}
1300
1301static void
1302bnx2_init_all_rx_contexts(struct bnx2 *bp)
1303{
1304	int i;
1305	u32 cid;
1306
1307	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1308		if (i == 1)
1309			cid = RX_RSS_CID;
1310		bnx2_init_rx_context(bp, cid);
1311	}
1312}
1313
1314static void
1315bnx2_set_mac_link(struct bnx2 *bp)
1316{
1317	u32 val;
1318
1319	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1320	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1321		(bp->duplex == DUPLEX_HALF)) {
1322		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1323	}
1324
1325	/* Configure the EMAC mode register. */
1326	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1327
1328	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1329		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1330		BNX2_EMAC_MODE_25G_MODE);
1331
1332	if (bp->link_up) {
1333		switch (bp->line_speed) {
1334			case SPEED_10:
1335				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1336					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1337					break;
1338				}
1339				fallthrough;
1340			case SPEED_100:
1341				val |= BNX2_EMAC_MODE_PORT_MII;
1342				break;
1343			case SPEED_2500:
1344				val |= BNX2_EMAC_MODE_25G_MODE;
1345				fallthrough;
1346			case SPEED_1000:
1347				val |= BNX2_EMAC_MODE_PORT_GMII;
1348				break;
1349		}
1350	}
1351	else {
1352		val |= BNX2_EMAC_MODE_PORT_GMII;
1353	}
1354
1355	/* Set the MAC to operate in the appropriate duplex mode. */
1356	if (bp->duplex == DUPLEX_HALF)
1357		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1358	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1359
1360	/* Enable/disable rx PAUSE. */
1361	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1362
1363	if (bp->flow_ctrl & FLOW_CTRL_RX)
1364		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1365	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1366
1367	/* Enable/disable tx PAUSE. */
1368	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1369	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1370
1371	if (bp->flow_ctrl & FLOW_CTRL_TX)
1372		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1373	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1374
1375	/* Acknowledge the interrupt. */
1376	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1377
1378	bnx2_init_all_rx_contexts(bp);
1379}
1380
1381static void
1382bnx2_enable_bmsr1(struct bnx2 *bp)
1383{
1384	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1386		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1387			       MII_BNX2_BLK_ADDR_GP_STATUS);
1388}
1389
1390static void
1391bnx2_disable_bmsr1(struct bnx2 *bp)
1392{
1393	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1395		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1396			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1397}
1398
1399static int
1400bnx2_test_and_enable_2g5(struct bnx2 *bp)
1401{
1402	u32 up1;
1403	int ret = 1;
1404
1405	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1406		return 0;
1407
1408	if (bp->autoneg & AUTONEG_SPEED)
1409		bp->advertising |= ADVERTISED_2500baseX_Full;
1410
1411	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1412		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1413
1414	bnx2_read_phy(bp, bp->mii_up1, &up1);
1415	if (!(up1 & BCM5708S_UP1_2G5)) {
1416		up1 |= BCM5708S_UP1_2G5;
1417		bnx2_write_phy(bp, bp->mii_up1, up1);
1418		ret = 0;
1419	}
1420
1421	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1423			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1424
1425	return ret;
1426}
1427
1428static int
1429bnx2_test_and_disable_2g5(struct bnx2 *bp)
1430{
1431	u32 up1;
1432	int ret = 0;
1433
1434	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1435		return 0;
1436
1437	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1438		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1439
1440	bnx2_read_phy(bp, bp->mii_up1, &up1);
1441	if (up1 & BCM5708S_UP1_2G5) {
1442		up1 &= ~BCM5708S_UP1_2G5;
1443		bnx2_write_phy(bp, bp->mii_up1, up1);
1444		ret = 1;
1445	}
1446
1447	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1448		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1449			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1450
1451	return ret;
1452}
1453
1454static void
1455bnx2_enable_forced_2g5(struct bnx2 *bp)
1456{
1457	u32 bmcr;
1458	int err;
1459
1460	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1461		return;
1462
1463	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1464		u32 val;
1465
1466		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1467			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1468		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1469			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1470			val |= MII_BNX2_SD_MISC1_FORCE |
1471				MII_BNX2_SD_MISC1_FORCE_2_5G;
1472			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1473		}
1474
1475		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1476			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1477		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1478
1479	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1480		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1481		if (!err)
1482			bmcr |= BCM5708S_BMCR_FORCE_2500;
1483	} else {
1484		return;
1485	}
1486
1487	if (err)
1488		return;
1489
1490	if (bp->autoneg & AUTONEG_SPEED) {
1491		bmcr &= ~BMCR_ANENABLE;
1492		if (bp->req_duplex == DUPLEX_FULL)
1493			bmcr |= BMCR_FULLDPLX;
1494	}
1495	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1496}
1497
1498static void
1499bnx2_disable_forced_2g5(struct bnx2 *bp)
1500{
1501	u32 bmcr;
1502	int err;
1503
1504	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1505		return;
1506
1507	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1508		u32 val;
1509
1510		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1511			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1512		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1513			val &= ~MII_BNX2_SD_MISC1_FORCE;
1514			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1515		}
1516
1517		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1518			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1519		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1520
1521	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1522		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1523		if (!err)
1524			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1525	} else {
1526		return;
1527	}
1528
1529	if (err)
1530		return;
1531
1532	if (bp->autoneg & AUTONEG_SPEED)
1533		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1534	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1535}
1536
1537static void
1538bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1539{
1540	u32 val;
1541
1542	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1543	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1544	if (start)
1545		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1546	else
1547		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1548}
1549
1550static int
1551bnx2_set_link(struct bnx2 *bp)
1552{
1553	u32 bmsr;
1554	u8 link_up;
1555
1556	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1557		bp->link_up = 1;
1558		return 0;
1559	}
1560
1561	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1562		return 0;
1563
1564	link_up = bp->link_up;
1565
1566	bnx2_enable_bmsr1(bp);
1567	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569	bnx2_disable_bmsr1(bp);
1570
1571	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1572	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1573		u32 val, an_dbg;
1574
1575		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1576			bnx2_5706s_force_link_dn(bp, 0);
1577			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1578		}
1579		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1580
1581		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1582		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584
1585		if ((val & BNX2_EMAC_STATUS_LINK) &&
1586		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1587			bmsr |= BMSR_LSTATUS;
1588		else
1589			bmsr &= ~BMSR_LSTATUS;
1590	}
1591
1592	if (bmsr & BMSR_LSTATUS) {
1593		bp->link_up = 1;
1594
1595		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1596			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1597				bnx2_5706s_linkup(bp);
1598			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1599				bnx2_5708s_linkup(bp);
1600			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1601				bnx2_5709s_linkup(bp);
1602		}
1603		else {
1604			bnx2_copper_linkup(bp);
1605		}
1606		bnx2_resolve_flow_ctrl(bp);
1607	}
1608	else {
1609		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1610		    (bp->autoneg & AUTONEG_SPEED))
1611			bnx2_disable_forced_2g5(bp);
1612
1613		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1614			u32 bmcr;
1615
1616			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1617			bmcr |= BMCR_ANENABLE;
1618			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1619
1620			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1621		}
1622		bp->link_up = 0;
1623	}
1624
1625	if (bp->link_up != link_up) {
1626		bnx2_report_link(bp);
1627	}
1628
1629	bnx2_set_mac_link(bp);
1630
1631	return 0;
1632}
1633
1634static int
1635bnx2_reset_phy(struct bnx2 *bp)
1636{
1637	int i;
1638	u32 reg;
1639
1640        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1641
1642#define PHY_RESET_MAX_WAIT 100
1643	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1644		udelay(10);
1645
1646		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1647		if (!(reg & BMCR_RESET)) {
1648			udelay(20);
1649			break;
1650		}
1651	}
1652	if (i == PHY_RESET_MAX_WAIT) {
1653		return -EBUSY;
1654	}
1655	return 0;
1656}
1657
1658static u32
1659bnx2_phy_get_pause_adv(struct bnx2 *bp)
1660{
1661	u32 adv = 0;
1662
1663	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1664		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1665
1666		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667			adv = ADVERTISE_1000XPAUSE;
1668		}
1669		else {
1670			adv = ADVERTISE_PAUSE_CAP;
1671		}
1672	}
1673	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1674		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675			adv = ADVERTISE_1000XPSE_ASYM;
1676		}
1677		else {
1678			adv = ADVERTISE_PAUSE_ASYM;
1679		}
1680	}
1681	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1682		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1683			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1684		}
1685		else {
1686			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1687		}
1688	}
1689	return adv;
1690}
1691
1692static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1693
1694static int
1695bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1696__releases(&bp->phy_lock)
1697__acquires(&bp->phy_lock)
1698{
1699	u32 speed_arg = 0, pause_adv;
1700
1701	pause_adv = bnx2_phy_get_pause_adv(bp);
1702
1703	if (bp->autoneg & AUTONEG_SPEED) {
1704		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1705		if (bp->advertising & ADVERTISED_10baseT_Half)
1706			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1707		if (bp->advertising & ADVERTISED_10baseT_Full)
1708			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1709		if (bp->advertising & ADVERTISED_100baseT_Half)
1710			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1711		if (bp->advertising & ADVERTISED_100baseT_Full)
1712			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1713		if (bp->advertising & ADVERTISED_1000baseT_Full)
1714			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1715		if (bp->advertising & ADVERTISED_2500baseX_Full)
1716			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1717	} else {
1718		if (bp->req_line_speed == SPEED_2500)
1719			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1720		else if (bp->req_line_speed == SPEED_1000)
1721			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1722		else if (bp->req_line_speed == SPEED_100) {
1723			if (bp->req_duplex == DUPLEX_FULL)
1724				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1725			else
1726				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1727		} else if (bp->req_line_speed == SPEED_10) {
1728			if (bp->req_duplex == DUPLEX_FULL)
1729				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1730			else
1731				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1732		}
1733	}
1734
1735	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1736		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1737	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1738		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1739
1740	if (port == PORT_TP)
1741		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1742			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1743
1744	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1745
1746	spin_unlock_bh(&bp->phy_lock);
1747	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1748	spin_lock_bh(&bp->phy_lock);
1749
1750	return 0;
1751}
1752
1753static int
1754bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1755__releases(&bp->phy_lock)
1756__acquires(&bp->phy_lock)
1757{
1758	u32 adv, bmcr;
1759	u32 new_adv = 0;
1760
1761	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1762		return bnx2_setup_remote_phy(bp, port);
1763
1764	if (!(bp->autoneg & AUTONEG_SPEED)) {
1765		u32 new_bmcr;
1766		int force_link_down = 0;
1767
1768		if (bp->req_line_speed == SPEED_2500) {
1769			if (!bnx2_test_and_enable_2g5(bp))
1770				force_link_down = 1;
1771		} else if (bp->req_line_speed == SPEED_1000) {
1772			if (bnx2_test_and_disable_2g5(bp))
1773				force_link_down = 1;
1774		}
1775		bnx2_read_phy(bp, bp->mii_adv, &adv);
1776		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1777
1778		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1779		new_bmcr = bmcr & ~BMCR_ANENABLE;
1780		new_bmcr |= BMCR_SPEED1000;
1781
1782		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1783			if (bp->req_line_speed == SPEED_2500)
1784				bnx2_enable_forced_2g5(bp);
1785			else if (bp->req_line_speed == SPEED_1000) {
1786				bnx2_disable_forced_2g5(bp);
1787				new_bmcr &= ~0x2000;
1788			}
1789
1790		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1791			if (bp->req_line_speed == SPEED_2500)
1792				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1793			else
1794				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1795		}
1796
1797		if (bp->req_duplex == DUPLEX_FULL) {
1798			adv |= ADVERTISE_1000XFULL;
1799			new_bmcr |= BMCR_FULLDPLX;
1800		}
1801		else {
1802			adv |= ADVERTISE_1000XHALF;
1803			new_bmcr &= ~BMCR_FULLDPLX;
1804		}
1805		if ((new_bmcr != bmcr) || (force_link_down)) {
1806			/* Force a link down visible on the other side */
1807			if (bp->link_up) {
1808				bnx2_write_phy(bp, bp->mii_adv, adv &
1809					       ~(ADVERTISE_1000XFULL |
1810						 ADVERTISE_1000XHALF));
1811				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1812					BMCR_ANRESTART | BMCR_ANENABLE);
1813
1814				bp->link_up = 0;
1815				netif_carrier_off(bp->dev);
1816				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1817				bnx2_report_link(bp);
1818			}
1819			bnx2_write_phy(bp, bp->mii_adv, adv);
1820			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1821		} else {
1822			bnx2_resolve_flow_ctrl(bp);
1823			bnx2_set_mac_link(bp);
1824		}
1825		return 0;
1826	}
1827
1828	bnx2_test_and_enable_2g5(bp);
1829
1830	if (bp->advertising & ADVERTISED_1000baseT_Full)
1831		new_adv |= ADVERTISE_1000XFULL;
1832
1833	new_adv |= bnx2_phy_get_pause_adv(bp);
1834
1835	bnx2_read_phy(bp, bp->mii_adv, &adv);
1836	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1837
1838	bp->serdes_an_pending = 0;
1839	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1840		/* Force a link down visible on the other side */
1841		if (bp->link_up) {
1842			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1843			spin_unlock_bh(&bp->phy_lock);
1844			msleep(20);
1845			spin_lock_bh(&bp->phy_lock);
1846		}
1847
1848		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1849		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1850			BMCR_ANENABLE);
1851		/* Speed up link-up time when the link partner
1852		 * does not autonegotiate which is very common
1853		 * in blade servers. Some blade servers use
1854		 * IPMI for kerboard input and it's important
1855		 * to minimize link disruptions. Autoneg. involves
1856		 * exchanging base pages plus 3 next pages and
1857		 * normally completes in about 120 msec.
1858		 */
1859		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1860		bp->serdes_an_pending = 1;
1861		mod_timer(&bp->timer, jiffies + bp->current_interval);
1862	} else {
1863		bnx2_resolve_flow_ctrl(bp);
1864		bnx2_set_mac_link(bp);
1865	}
1866
1867	return 0;
1868}
1869
1870#define ETHTOOL_ALL_FIBRE_SPEED						\
1871	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1872		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1873		(ADVERTISED_1000baseT_Full)
1874
1875#define ETHTOOL_ALL_COPPER_SPEED					\
1876	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1877	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1878	ADVERTISED_1000baseT_Full)
1879
1880#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1881	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1882
1883#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1884
1885static void
1886bnx2_set_default_remote_link(struct bnx2 *bp)
1887{
1888	u32 link;
1889
1890	if (bp->phy_port == PORT_TP)
1891		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1892	else
1893		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1894
1895	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1896		bp->req_line_speed = 0;
1897		bp->autoneg |= AUTONEG_SPEED;
1898		bp->advertising = ADVERTISED_Autoneg;
1899		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900			bp->advertising |= ADVERTISED_10baseT_Half;
1901		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1902			bp->advertising |= ADVERTISED_10baseT_Full;
1903		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1904			bp->advertising |= ADVERTISED_100baseT_Half;
1905		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1906			bp->advertising |= ADVERTISED_100baseT_Full;
1907		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908			bp->advertising |= ADVERTISED_1000baseT_Full;
1909		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910			bp->advertising |= ADVERTISED_2500baseX_Full;
1911	} else {
1912		bp->autoneg = 0;
1913		bp->advertising = 0;
1914		bp->req_duplex = DUPLEX_FULL;
1915		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1916			bp->req_line_speed = SPEED_10;
1917			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1918				bp->req_duplex = DUPLEX_HALF;
1919		}
1920		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1921			bp->req_line_speed = SPEED_100;
1922			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1923				bp->req_duplex = DUPLEX_HALF;
1924		}
1925		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1926			bp->req_line_speed = SPEED_1000;
1927		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1928			bp->req_line_speed = SPEED_2500;
1929	}
1930}
1931
1932static void
1933bnx2_set_default_link(struct bnx2 *bp)
1934{
1935	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1936		bnx2_set_default_remote_link(bp);
1937		return;
1938	}
1939
1940	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1941	bp->req_line_speed = 0;
1942	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1943		u32 reg;
1944
1945		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1946
1947		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1948		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1949		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1950			bp->autoneg = 0;
1951			bp->req_line_speed = bp->line_speed = SPEED_1000;
1952			bp->req_duplex = DUPLEX_FULL;
1953		}
1954	} else
1955		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1956}
1957
1958static void
1959bnx2_send_heart_beat(struct bnx2 *bp)
1960{
1961	u32 msg;
1962	u32 addr;
1963
1964	spin_lock(&bp->indirect_lock);
1965	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1966	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1967	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1968	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1969	spin_unlock(&bp->indirect_lock);
1970}
1971
1972static void
1973bnx2_remote_phy_event(struct bnx2 *bp)
1974{
1975	u32 msg;
1976	u8 link_up = bp->link_up;
1977	u8 old_port;
1978
1979	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1980
1981	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1982		bnx2_send_heart_beat(bp);
1983
1984	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1985
1986	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1987		bp->link_up = 0;
1988	else {
1989		u32 speed;
1990
1991		bp->link_up = 1;
1992		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1993		bp->duplex = DUPLEX_FULL;
1994		switch (speed) {
1995			case BNX2_LINK_STATUS_10HALF:
1996				bp->duplex = DUPLEX_HALF;
1997				fallthrough;
1998			case BNX2_LINK_STATUS_10FULL:
1999				bp->line_speed = SPEED_10;
2000				break;
2001			case BNX2_LINK_STATUS_100HALF:
2002				bp->duplex = DUPLEX_HALF;
2003				fallthrough;
2004			case BNX2_LINK_STATUS_100BASE_T4:
2005			case BNX2_LINK_STATUS_100FULL:
2006				bp->line_speed = SPEED_100;
2007				break;
2008			case BNX2_LINK_STATUS_1000HALF:
2009				bp->duplex = DUPLEX_HALF;
2010				fallthrough;
2011			case BNX2_LINK_STATUS_1000FULL:
2012				bp->line_speed = SPEED_1000;
2013				break;
2014			case BNX2_LINK_STATUS_2500HALF:
2015				bp->duplex = DUPLEX_HALF;
2016				fallthrough;
2017			case BNX2_LINK_STATUS_2500FULL:
2018				bp->line_speed = SPEED_2500;
2019				break;
2020			default:
2021				bp->line_speed = 0;
2022				break;
2023		}
2024
2025		bp->flow_ctrl = 0;
2026		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2027		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2028			if (bp->duplex == DUPLEX_FULL)
2029				bp->flow_ctrl = bp->req_flow_ctrl;
2030		} else {
2031			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2032				bp->flow_ctrl |= FLOW_CTRL_TX;
2033			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2034				bp->flow_ctrl |= FLOW_CTRL_RX;
2035		}
2036
2037		old_port = bp->phy_port;
2038		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2039			bp->phy_port = PORT_FIBRE;
2040		else
2041			bp->phy_port = PORT_TP;
2042
2043		if (old_port != bp->phy_port)
2044			bnx2_set_default_link(bp);
2045
2046	}
2047	if (bp->link_up != link_up)
2048		bnx2_report_link(bp);
2049
2050	bnx2_set_mac_link(bp);
2051}
2052
2053static int
2054bnx2_set_remote_link(struct bnx2 *bp)
2055{
2056	u32 evt_code;
2057
2058	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2059	switch (evt_code) {
2060		case BNX2_FW_EVT_CODE_LINK_EVENT:
2061			bnx2_remote_phy_event(bp);
2062			break;
2063		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2064		default:
2065			bnx2_send_heart_beat(bp);
2066			break;
2067	}
2068	return 0;
2069}
2070
2071static int
2072bnx2_setup_copper_phy(struct bnx2 *bp)
2073__releases(&bp->phy_lock)
2074__acquires(&bp->phy_lock)
2075{
2076	u32 bmcr, adv_reg, new_adv = 0;
2077	u32 new_bmcr;
2078
2079	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2080
2081	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2082	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2083		    ADVERTISE_PAUSE_ASYM);
2084
2085	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2086
2087	if (bp->autoneg & AUTONEG_SPEED) {
2088		u32 adv1000_reg;
2089		u32 new_adv1000 = 0;
2090
2091		new_adv |= bnx2_phy_get_pause_adv(bp);
2092
2093		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2094		adv1000_reg &= PHY_ALL_1000_SPEED;
2095
2096		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2097		if ((adv1000_reg != new_adv1000) ||
2098			(adv_reg != new_adv) ||
2099			((bmcr & BMCR_ANENABLE) == 0)) {
2100
2101			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2102			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2103			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2104				BMCR_ANENABLE);
2105		}
2106		else if (bp->link_up) {
2107			/* Flow ctrl may have changed from auto to forced */
2108			/* or vice-versa. */
2109
2110			bnx2_resolve_flow_ctrl(bp);
2111			bnx2_set_mac_link(bp);
2112		}
2113		return 0;
2114	}
2115
2116	/* advertise nothing when forcing speed */
2117	if (adv_reg != new_adv)
2118		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2119
2120	new_bmcr = 0;
2121	if (bp->req_line_speed == SPEED_100) {
2122		new_bmcr |= BMCR_SPEED100;
2123	}
2124	if (bp->req_duplex == DUPLEX_FULL) {
2125		new_bmcr |= BMCR_FULLDPLX;
2126	}
2127	if (new_bmcr != bmcr) {
2128		u32 bmsr;
2129
2130		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2131		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132
2133		if (bmsr & BMSR_LSTATUS) {
2134			/* Force link down */
2135			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2136			spin_unlock_bh(&bp->phy_lock);
2137			msleep(50);
2138			spin_lock_bh(&bp->phy_lock);
2139
2140			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142		}
2143
2144		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2145
2146		/* Normally, the new speed is setup after the link has
2147		 * gone down and up again. In some cases, link will not go
2148		 * down so we need to set up the new speed here.
2149		 */
2150		if (bmsr & BMSR_LSTATUS) {
2151			bp->line_speed = bp->req_line_speed;
2152			bp->duplex = bp->req_duplex;
2153			bnx2_resolve_flow_ctrl(bp);
2154			bnx2_set_mac_link(bp);
2155		}
2156	} else {
2157		bnx2_resolve_flow_ctrl(bp);
2158		bnx2_set_mac_link(bp);
2159	}
2160	return 0;
2161}
2162
2163static int
2164bnx2_setup_phy(struct bnx2 *bp, u8 port)
2165__releases(&bp->phy_lock)
2166__acquires(&bp->phy_lock)
2167{
2168	if (bp->loopback == MAC_LOOPBACK)
2169		return 0;
2170
2171	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2172		return bnx2_setup_serdes_phy(bp, port);
2173	}
2174	else {
2175		return bnx2_setup_copper_phy(bp);
2176	}
2177}
2178
2179static int
2180bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2181{
2182	u32 val;
2183
2184	bp->mii_bmcr = MII_BMCR + 0x10;
2185	bp->mii_bmsr = MII_BMSR + 0x10;
2186	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2187	bp->mii_adv = MII_ADVERTISE + 0x10;
2188	bp->mii_lpa = MII_LPA + 0x10;
2189	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2190
2191	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2192	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2193
2194	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2195	if (reset_phy)
2196		bnx2_reset_phy(bp);
2197
2198	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2199
2200	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2201	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2202	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2203	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2204
2205	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2206	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2207	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2208		val |= BCM5708S_UP1_2G5;
2209	else
2210		val &= ~BCM5708S_UP1_2G5;
2211	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2212
2213	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2214	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2215	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2216	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2217
2218	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2219
2220	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2221	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2222	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2223
2224	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2225
2226	return 0;
2227}
2228
2229static int
2230bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2231{
2232	u32 val;
2233
2234	if (reset_phy)
2235		bnx2_reset_phy(bp);
2236
2237	bp->mii_up1 = BCM5708S_UP1;
2238
2239	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2240	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2241	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2242
2243	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2244	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2245	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2246
2247	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2248	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2249	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2250
2251	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2252		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2253		val |= BCM5708S_UP1_2G5;
2254		bnx2_write_phy(bp, BCM5708S_UP1, val);
2255	}
2256
2257	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2258	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2259	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2260		/* increase tx signal amplitude */
2261		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2262			       BCM5708S_BLK_ADDR_TX_MISC);
2263		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2264		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2265		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2266		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2267	}
2268
2269	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2270	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2271
2272	if (val) {
2273		u32 is_backplane;
2274
2275		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2276		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2277			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2278				       BCM5708S_BLK_ADDR_TX_MISC);
2279			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2280			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2281				       BCM5708S_BLK_ADDR_DIG);
2282		}
2283	}
2284	return 0;
2285}
2286
2287static int
2288bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2289{
2290	if (reset_phy)
2291		bnx2_reset_phy(bp);
2292
2293	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2294
2295	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2296		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2297
2298	if (bp->dev->mtu > ETH_DATA_LEN) {
2299		u32 val;
2300
2301		/* Set extended packet length bit */
2302		bnx2_write_phy(bp, 0x18, 0x7);
2303		bnx2_read_phy(bp, 0x18, &val);
2304		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2305
2306		bnx2_write_phy(bp, 0x1c, 0x6c00);
2307		bnx2_read_phy(bp, 0x1c, &val);
2308		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2309	}
2310	else {
2311		u32 val;
2312
2313		bnx2_write_phy(bp, 0x18, 0x7);
2314		bnx2_read_phy(bp, 0x18, &val);
2315		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2316
2317		bnx2_write_phy(bp, 0x1c, 0x6c00);
2318		bnx2_read_phy(bp, 0x1c, &val);
2319		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2320	}
2321
2322	return 0;
2323}
2324
2325static int
2326bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2327{
2328	u32 val;
2329
2330	if (reset_phy)
2331		bnx2_reset_phy(bp);
2332
2333	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2334		bnx2_write_phy(bp, 0x18, 0x0c00);
2335		bnx2_write_phy(bp, 0x17, 0x000a);
2336		bnx2_write_phy(bp, 0x15, 0x310b);
2337		bnx2_write_phy(bp, 0x17, 0x201f);
2338		bnx2_write_phy(bp, 0x15, 0x9506);
2339		bnx2_write_phy(bp, 0x17, 0x401f);
2340		bnx2_write_phy(bp, 0x15, 0x14e2);
2341		bnx2_write_phy(bp, 0x18, 0x0400);
2342	}
2343
2344	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2345		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2346			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2347		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2348		val &= ~(1 << 8);
2349		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2350	}
2351
2352	if (bp->dev->mtu > ETH_DATA_LEN) {
2353		/* Set extended packet length bit */
2354		bnx2_write_phy(bp, 0x18, 0x7);
2355		bnx2_read_phy(bp, 0x18, &val);
2356		bnx2_write_phy(bp, 0x18, val | 0x4000);
2357
2358		bnx2_read_phy(bp, 0x10, &val);
2359		bnx2_write_phy(bp, 0x10, val | 0x1);
2360	}
2361	else {
2362		bnx2_write_phy(bp, 0x18, 0x7);
2363		bnx2_read_phy(bp, 0x18, &val);
2364		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2365
2366		bnx2_read_phy(bp, 0x10, &val);
2367		bnx2_write_phy(bp, 0x10, val & ~0x1);
2368	}
2369
2370	/* ethernet@wirespeed */
2371	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2372	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2373	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2374
2375	/* auto-mdix */
2376	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2377		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2378
2379	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2380	return 0;
2381}
2382
2383
2384static int
2385bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2386__releases(&bp->phy_lock)
2387__acquires(&bp->phy_lock)
2388{
2389	u32 val;
2390	int rc = 0;
2391
2392	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2393	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2394
2395	bp->mii_bmcr = MII_BMCR;
2396	bp->mii_bmsr = MII_BMSR;
2397	bp->mii_bmsr1 = MII_BMSR;
2398	bp->mii_adv = MII_ADVERTISE;
2399	bp->mii_lpa = MII_LPA;
2400
2401	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2402
2403	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2404		goto setup_phy;
2405
2406	bnx2_read_phy(bp, MII_PHYSID1, &val);
2407	bp->phy_id = val << 16;
2408	bnx2_read_phy(bp, MII_PHYSID2, &val);
2409	bp->phy_id |= val & 0xffff;
2410
2411	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2412		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2413			rc = bnx2_init_5706s_phy(bp, reset_phy);
2414		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2415			rc = bnx2_init_5708s_phy(bp, reset_phy);
2416		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2417			rc = bnx2_init_5709s_phy(bp, reset_phy);
2418	}
2419	else {
2420		rc = bnx2_init_copper_phy(bp, reset_phy);
2421	}
2422
2423setup_phy:
2424	if (!rc)
2425		rc = bnx2_setup_phy(bp, bp->phy_port);
2426
2427	return rc;
2428}
2429
2430static int
2431bnx2_set_mac_loopback(struct bnx2 *bp)
2432{
2433	u32 mac_mode;
2434
2435	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2436	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2437	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2438	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439	bp->link_up = 1;
2440	return 0;
2441}
2442
2443static int bnx2_test_link(struct bnx2 *);
2444
2445static int
2446bnx2_set_phy_loopback(struct bnx2 *bp)
2447{
2448	u32 mac_mode;
2449	int rc, i;
2450
2451	spin_lock_bh(&bp->phy_lock);
2452	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2453			    BMCR_SPEED1000);
2454	spin_unlock_bh(&bp->phy_lock);
2455	if (rc)
2456		return rc;
2457
2458	for (i = 0; i < 10; i++) {
2459		if (bnx2_test_link(bp) == 0)
2460			break;
2461		msleep(100);
2462	}
2463
2464	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2465	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2466		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2467		      BNX2_EMAC_MODE_25G_MODE);
2468
2469	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2470	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2471	bp->link_up = 1;
2472	return 0;
2473}
2474
2475static void
2476bnx2_dump_mcp_state(struct bnx2 *bp)
2477{
2478	struct net_device *dev = bp->dev;
2479	u32 mcp_p0, mcp_p1;
2480
2481	netdev_err(dev, "<--- start MCP states dump --->\n");
2482	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2483		mcp_p0 = BNX2_MCP_STATE_P0;
2484		mcp_p1 = BNX2_MCP_STATE_P1;
2485	} else {
2486		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2487		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2488	}
2489	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2490		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2491	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2492		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2493		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2494		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2495	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2496		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2497		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2499	netdev_err(dev, "DEBUG: shmem states:\n");
2500	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2501		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2502		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2503		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2504	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2505	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2506		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2507		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2508	pr_cont(" condition[%08x]\n",
2509		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2510	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2511	DP_SHMEM_LINE(bp, 0x3cc);
2512	DP_SHMEM_LINE(bp, 0x3dc);
2513	DP_SHMEM_LINE(bp, 0x3ec);
2514	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2515	netdev_err(dev, "<--- end MCP states dump --->\n");
2516}
2517
2518static int
2519bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2520{
2521	int i;
2522	u32 val;
2523
2524	bp->fw_wr_seq++;
2525	msg_data |= bp->fw_wr_seq;
2526	bp->fw_last_msg = msg_data;
2527
2528	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2529
2530	if (!ack)
2531		return 0;
2532
2533	/* wait for an acknowledgement. */
2534	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2535		msleep(10);
2536
2537		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2538
2539		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2540			break;
2541	}
2542	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2543		return 0;
2544
2545	/* If we timed out, inform the firmware that this is the case. */
2546	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2547		msg_data &= ~BNX2_DRV_MSG_CODE;
2548		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2549
2550		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2551		if (!silent) {
2552			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2553			bnx2_dump_mcp_state(bp);
2554		}
2555
2556		return -EBUSY;
2557	}
2558
2559	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2560		return -EIO;
2561
2562	return 0;
2563}
2564
2565static int
2566bnx2_init_5709_context(struct bnx2 *bp)
2567{
2568	int i, ret = 0;
2569	u32 val;
2570
2571	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2572	val |= (BNX2_PAGE_BITS - 8) << 16;
2573	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2574	for (i = 0; i < 10; i++) {
2575		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2576		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2577			break;
2578		udelay(2);
2579	}
2580	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2581		return -EBUSY;
2582
2583	for (i = 0; i < bp->ctx_pages; i++) {
2584		int j;
2585
2586		if (bp->ctx_blk[i])
2587			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2588		else
2589			return -ENOMEM;
2590
2591		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2592			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2593			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2594		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2595			(u64) bp->ctx_blk_mapping[i] >> 32);
2596		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2597			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2598		for (j = 0; j < 10; j++) {
2599
2600			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2601			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2602				break;
2603			udelay(5);
2604		}
2605		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2606			ret = -EBUSY;
2607			break;
2608		}
2609	}
2610	return ret;
2611}
2612
2613static void
2614bnx2_init_context(struct bnx2 *bp)
2615{
2616	u32 vcid;
2617
2618	vcid = 96;
2619	while (vcid) {
2620		u32 vcid_addr, pcid_addr, offset;
2621		int i;
2622
2623		vcid--;
2624
2625		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2626			u32 new_vcid;
2627
2628			vcid_addr = GET_PCID_ADDR(vcid);
2629			if (vcid & 0x8) {
2630				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2631			}
2632			else {
2633				new_vcid = vcid;
2634			}
2635			pcid_addr = GET_PCID_ADDR(new_vcid);
2636		}
2637		else {
2638	    		vcid_addr = GET_CID_ADDR(vcid);
2639			pcid_addr = vcid_addr;
2640		}
2641
2642		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2643			vcid_addr += (i << PHY_CTX_SHIFT);
2644			pcid_addr += (i << PHY_CTX_SHIFT);
2645
2646			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2647			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2648
2649			/* Zero out the context. */
2650			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2651				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2652		}
2653	}
2654}
2655
2656static int
2657bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2658{
2659	u16 *good_mbuf;
2660	u32 good_mbuf_cnt;
2661	u32 val;
2662
2663	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2664	if (!good_mbuf)
2665		return -ENOMEM;
2666
2667	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2668		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2669
2670	good_mbuf_cnt = 0;
2671
2672	/* Allocate a bunch of mbufs and save the good ones in an array. */
2673	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2674	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2675		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2676				BNX2_RBUF_COMMAND_ALLOC_REQ);
2677
2678		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2679
2680		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2681
2682		/* The addresses with Bit 9 set are bad memory blocks. */
2683		if (!(val & (1 << 9))) {
2684			good_mbuf[good_mbuf_cnt] = (u16) val;
2685			good_mbuf_cnt++;
2686		}
2687
2688		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2689	}
2690
2691	/* Free the good ones back to the mbuf pool thus discarding
2692	 * all the bad ones. */
2693	while (good_mbuf_cnt) {
2694		good_mbuf_cnt--;
2695
2696		val = good_mbuf[good_mbuf_cnt];
2697		val = (val << 9) | val | 1;
2698
2699		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2700	}
2701	kfree(good_mbuf);
2702	return 0;
2703}
2704
2705static void
2706bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2707{
2708	u32 val;
2709
2710	val = (mac_addr[0] << 8) | mac_addr[1];
2711
2712	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2713
2714	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2715		(mac_addr[4] << 8) | mac_addr[5];
2716
2717	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2718}
2719
2720static inline int
2721bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2722{
2723	dma_addr_t mapping;
2724	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2725	struct bnx2_rx_bd *rxbd =
2726		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2727	struct page *page = alloc_page(gfp);
2728
2729	if (!page)
2730		return -ENOMEM;
2731	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2732			       DMA_FROM_DEVICE);
2733	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2734		__free_page(page);
2735		return -EIO;
2736	}
2737
2738	rx_pg->page = page;
2739	dma_unmap_addr_set(rx_pg, mapping, mapping);
2740	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2741	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2742	return 0;
2743}
2744
2745static void
2746bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2747{
2748	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2749	struct page *page = rx_pg->page;
2750
2751	if (!page)
2752		return;
2753
2754	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2755		       PAGE_SIZE, DMA_FROM_DEVICE);
2756
2757	__free_page(page);
2758	rx_pg->page = NULL;
2759}
2760
2761static inline int
2762bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2763{
2764	u8 *data;
2765	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2766	dma_addr_t mapping;
2767	struct bnx2_rx_bd *rxbd =
2768		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2769
2770	data = kmalloc(bp->rx_buf_size, gfp);
2771	if (!data)
2772		return -ENOMEM;
2773
2774	mapping = dma_map_single(&bp->pdev->dev,
2775				 get_l2_fhdr(data),
2776				 bp->rx_buf_use_size,
2777				 DMA_FROM_DEVICE);
2778	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2779		kfree(data);
2780		return -EIO;
2781	}
2782
2783	rx_buf->data = data;
2784	dma_unmap_addr_set(rx_buf, mapping, mapping);
2785
2786	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2787	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2788
2789	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2790
2791	return 0;
2792}
2793
2794static int
2795bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2796{
2797	struct status_block *sblk = bnapi->status_blk.msi;
2798	u32 new_link_state, old_link_state;
2799	int is_set = 1;
2800
2801	new_link_state = sblk->status_attn_bits & event;
2802	old_link_state = sblk->status_attn_bits_ack & event;
2803	if (new_link_state != old_link_state) {
2804		if (new_link_state)
2805			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2806		else
2807			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2808	} else
2809		is_set = 0;
2810
2811	return is_set;
2812}
2813
2814static void
2815bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2816{
2817	spin_lock(&bp->phy_lock);
2818
2819	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2820		bnx2_set_link(bp);
2821	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2822		bnx2_set_remote_link(bp);
2823
2824	spin_unlock(&bp->phy_lock);
2825
2826}
2827
2828static inline u16
2829bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2830{
2831	u16 cons;
2832
2833	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2834
2835	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2836		cons++;
2837	return cons;
2838}
2839
2840static int
2841bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2842{
2843	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2844	u16 hw_cons, sw_cons, sw_ring_cons;
2845	int tx_pkt = 0, index;
2846	unsigned int tx_bytes = 0;
2847	struct netdev_queue *txq;
2848
2849	index = (bnapi - bp->bnx2_napi);
2850	txq = netdev_get_tx_queue(bp->dev, index);
2851
2852	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2853	sw_cons = txr->tx_cons;
2854
2855	while (sw_cons != hw_cons) {
2856		struct bnx2_sw_tx_bd *tx_buf;
2857		struct sk_buff *skb;
2858		int i, last;
2859
2860		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2861
2862		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2863		skb = tx_buf->skb;
2864
2865		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2866		prefetch(&skb->end);
2867
2868		/* partial BD completions possible with TSO packets */
2869		if (tx_buf->is_gso) {
2870			u16 last_idx, last_ring_idx;
2871
2872			last_idx = sw_cons + tx_buf->nr_frags + 1;
2873			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2874			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2875				last_idx++;
2876			}
2877			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2878				break;
2879			}
2880		}
2881
2882		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2883			skb_headlen(skb), DMA_TO_DEVICE);
2884
2885		tx_buf->skb = NULL;
2886		last = tx_buf->nr_frags;
2887
2888		for (i = 0; i < last; i++) {
2889			struct bnx2_sw_tx_bd *tx_buf;
2890
2891			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2892
2893			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2894			dma_unmap_page(&bp->pdev->dev,
2895				dma_unmap_addr(tx_buf, mapping),
2896				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2897				DMA_TO_DEVICE);
2898		}
2899
2900		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2901
2902		tx_bytes += skb->len;
2903		dev_kfree_skb_any(skb);
2904		tx_pkt++;
2905		if (tx_pkt == budget)
2906			break;
2907
2908		if (hw_cons == sw_cons)
2909			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2910	}
2911
2912	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2913	txr->hw_tx_cons = hw_cons;
2914	txr->tx_cons = sw_cons;
2915
2916	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2917	 * before checking for netif_tx_queue_stopped().  Without the
2918	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2919	 * will miss it and cause the queue to be stopped forever.
2920	 */
2921	smp_mb();
2922
2923	if (unlikely(netif_tx_queue_stopped(txq)) &&
2924		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2925		__netif_tx_lock(txq, smp_processor_id());
2926		if ((netif_tx_queue_stopped(txq)) &&
2927		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2928			netif_tx_wake_queue(txq);
2929		__netif_tx_unlock(txq);
2930	}
2931
2932	return tx_pkt;
2933}
2934
2935static void
2936bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2937			struct sk_buff *skb, int count)
2938{
2939	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2940	struct bnx2_rx_bd *cons_bd, *prod_bd;
2941	int i;
2942	u16 hw_prod, prod;
2943	u16 cons = rxr->rx_pg_cons;
2944
2945	cons_rx_pg = &rxr->rx_pg_ring[cons];
2946
2947	/* The caller was unable to allocate a new page to replace the
2948	 * last one in the frags array, so we need to recycle that page
2949	 * and then free the skb.
2950	 */
2951	if (skb) {
2952		struct page *page;
2953		struct skb_shared_info *shinfo;
2954
2955		shinfo = skb_shinfo(skb);
2956		shinfo->nr_frags--;
2957		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
 
2958
2959		cons_rx_pg->page = page;
2960		dev_kfree_skb(skb);
2961	}
2962
2963	hw_prod = rxr->rx_pg_prod;
2964
2965	for (i = 0; i < count; i++) {
2966		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2967
2968		prod_rx_pg = &rxr->rx_pg_ring[prod];
2969		cons_rx_pg = &rxr->rx_pg_ring[cons];
2970		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2971						[BNX2_RX_IDX(cons)];
2972		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2973						[BNX2_RX_IDX(prod)];
2974
2975		if (prod != cons) {
2976			prod_rx_pg->page = cons_rx_pg->page;
2977			cons_rx_pg->page = NULL;
2978			dma_unmap_addr_set(prod_rx_pg, mapping,
2979				dma_unmap_addr(cons_rx_pg, mapping));
2980
2981			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2982			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2983
2984		}
2985		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2986		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2987	}
2988	rxr->rx_pg_prod = hw_prod;
2989	rxr->rx_pg_cons = cons;
2990}
2991
2992static inline void
2993bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2994		   u8 *data, u16 cons, u16 prod)
2995{
2996	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2997	struct bnx2_rx_bd *cons_bd, *prod_bd;
2998
2999	cons_rx_buf = &rxr->rx_buf_ring[cons];
3000	prod_rx_buf = &rxr->rx_buf_ring[prod];
3001
3002	dma_sync_single_for_device(&bp->pdev->dev,
3003		dma_unmap_addr(cons_rx_buf, mapping),
3004		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
3005
3006	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3007
3008	prod_rx_buf->data = data;
3009
3010	if (cons == prod)
3011		return;
3012
3013	dma_unmap_addr_set(prod_rx_buf, mapping,
3014			dma_unmap_addr(cons_rx_buf, mapping));
3015
3016	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3017	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3018	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3019	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3020}
3021
3022static struct sk_buff *
3023bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3024	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3025	    u32 ring_idx)
3026{
3027	int err;
3028	u16 prod = ring_idx & 0xffff;
3029	struct sk_buff *skb;
3030
3031	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3032	if (unlikely(err)) {
3033		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3034error:
3035		if (hdr_len) {
3036			unsigned int raw_len = len + 4;
3037			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3038
3039			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3040		}
3041		return NULL;
3042	}
3043
3044	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3045			 DMA_FROM_DEVICE);
3046	skb = slab_build_skb(data);
3047	if (!skb) {
3048		kfree(data);
3049		goto error;
3050	}
3051	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3052	if (hdr_len == 0) {
3053		skb_put(skb, len);
3054		return skb;
3055	} else {
3056		unsigned int i, frag_len, frag_size, pages;
3057		struct bnx2_sw_pg *rx_pg;
3058		u16 pg_cons = rxr->rx_pg_cons;
3059		u16 pg_prod = rxr->rx_pg_prod;
3060
3061		frag_size = len + 4 - hdr_len;
3062		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3063		skb_put(skb, hdr_len);
3064
3065		for (i = 0; i < pages; i++) {
3066			dma_addr_t mapping_old;
3067
3068			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3069			if (unlikely(frag_len <= 4)) {
3070				unsigned int tail = 4 - frag_len;
3071
3072				rxr->rx_pg_cons = pg_cons;
3073				rxr->rx_pg_prod = pg_prod;
3074				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3075							pages - i);
3076				skb->len -= tail;
3077				if (i == 0) {
3078					skb->tail -= tail;
3079				} else {
3080					skb_frag_t *frag =
3081						&skb_shinfo(skb)->frags[i - 1];
3082					skb_frag_size_sub(frag, tail);
3083					skb->data_len -= tail;
3084				}
3085				return skb;
3086			}
3087			rx_pg = &rxr->rx_pg_ring[pg_cons];
3088
3089			/* Don't unmap yet.  If we're unable to allocate a new
3090			 * page, we need to recycle the page and the DMA addr.
3091			 */
3092			mapping_old = dma_unmap_addr(rx_pg, mapping);
3093			if (i == pages - 1)
3094				frag_len -= 4;
3095
3096			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3097			rx_pg->page = NULL;
3098
3099			err = bnx2_alloc_rx_page(bp, rxr,
3100						 BNX2_RX_PG_RING_IDX(pg_prod),
3101						 GFP_ATOMIC);
3102			if (unlikely(err)) {
3103				rxr->rx_pg_cons = pg_cons;
3104				rxr->rx_pg_prod = pg_prod;
3105				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3106							pages - i);
3107				return NULL;
3108			}
3109
3110			dma_unmap_page(&bp->pdev->dev, mapping_old,
3111				       PAGE_SIZE, DMA_FROM_DEVICE);
3112
3113			frag_size -= frag_len;
3114			skb->data_len += frag_len;
3115			skb->truesize += PAGE_SIZE;
3116			skb->len += frag_len;
3117
3118			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3119			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3120		}
3121		rxr->rx_pg_prod = pg_prod;
3122		rxr->rx_pg_cons = pg_cons;
3123	}
3124	return skb;
3125}
3126
3127static inline u16
3128bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3129{
3130	u16 cons;
3131
3132	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3133
3134	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3135		cons++;
3136	return cons;
3137}
3138
3139static int
3140bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3141{
3142	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3143	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3144	struct l2_fhdr *rx_hdr;
3145	int rx_pkt = 0, pg_ring_used = 0;
3146
3147	if (budget <= 0)
3148		return rx_pkt;
3149
3150	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3151	sw_cons = rxr->rx_cons;
3152	sw_prod = rxr->rx_prod;
3153
3154	/* Memory barrier necessary as speculative reads of the rx
3155	 * buffer can be ahead of the index in the status block
3156	 */
3157	rmb();
3158	while (sw_cons != hw_cons) {
3159		unsigned int len, hdr_len;
3160		u32 status;
3161		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3162		struct sk_buff *skb;
3163		dma_addr_t dma_addr;
3164		u8 *data;
3165		u16 next_ring_idx;
3166
3167		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3168		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3169
3170		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3171		data = rx_buf->data;
3172		rx_buf->data = NULL;
3173
3174		rx_hdr = get_l2_fhdr(data);
3175		prefetch(rx_hdr);
3176
3177		dma_addr = dma_unmap_addr(rx_buf, mapping);
3178
3179		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3180			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3181			DMA_FROM_DEVICE);
3182
3183		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3184		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3185		prefetch(get_l2_fhdr(next_rx_buf->data));
3186
3187		len = rx_hdr->l2_fhdr_pkt_len;
3188		status = rx_hdr->l2_fhdr_status;
3189
3190		hdr_len = 0;
3191		if (status & L2_FHDR_STATUS_SPLIT) {
3192			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3193			pg_ring_used = 1;
3194		} else if (len > bp->rx_jumbo_thresh) {
3195			hdr_len = bp->rx_jumbo_thresh;
3196			pg_ring_used = 1;
3197		}
3198
3199		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3200				       L2_FHDR_ERRORS_PHY_DECODE |
3201				       L2_FHDR_ERRORS_ALIGNMENT |
3202				       L2_FHDR_ERRORS_TOO_SHORT |
3203				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3204
3205			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3206					  sw_ring_prod);
3207			if (pg_ring_used) {
3208				int pages;
3209
3210				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3211
3212				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3213			}
3214			goto next_rx;
3215		}
3216
3217		len -= 4;
3218
3219		if (len <= bp->rx_copy_thresh) {
3220			skb = netdev_alloc_skb(bp->dev, len + 6);
3221			if (!skb) {
3222				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3223						  sw_ring_prod);
3224				goto next_rx;
3225			}
3226
3227			/* aligned copy */
3228			memcpy(skb->data,
3229			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3230			       len + 6);
3231			skb_reserve(skb, 6);
3232			skb_put(skb, len);
3233
3234			bnx2_reuse_rx_data(bp, rxr, data,
3235				sw_ring_cons, sw_ring_prod);
3236
3237		} else {
3238			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3239					  (sw_ring_cons << 16) | sw_ring_prod);
3240			if (!skb)
3241				goto next_rx;
3242		}
3243		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3244		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3245			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3246
3247		skb->protocol = eth_type_trans(skb, bp->dev);
3248
3249		if (len > (bp->dev->mtu + ETH_HLEN) &&
3250		    skb->protocol != htons(0x8100) &&
3251		    skb->protocol != htons(ETH_P_8021AD)) {
3252
3253			dev_kfree_skb(skb);
3254			goto next_rx;
3255
3256		}
3257
3258		skb_checksum_none_assert(skb);
3259		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3260			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3261			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3262
3263			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3264					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3265				skb->ip_summed = CHECKSUM_UNNECESSARY;
3266		}
3267		if ((bp->dev->features & NETIF_F_RXHASH) &&
3268		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3269		     L2_FHDR_STATUS_USE_RXHASH))
3270			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3271				     PKT_HASH_TYPE_L3);
3272
3273		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3274		napi_gro_receive(&bnapi->napi, skb);
3275		rx_pkt++;
3276
3277next_rx:
3278		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3279		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3280
3281		if (rx_pkt == budget)
3282			break;
3283
3284		/* Refresh hw_cons to see if there is new work */
3285		if (sw_cons == hw_cons) {
3286			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3287			rmb();
3288		}
3289	}
3290	rxr->rx_cons = sw_cons;
3291	rxr->rx_prod = sw_prod;
3292
3293	if (pg_ring_used)
3294		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3295
3296	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3297
3298	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3299
3300	return rx_pkt;
3301
3302}
3303
3304/* MSI ISR - The only difference between this and the INTx ISR
3305 * is that the MSI interrupt is always serviced.
3306 */
3307static irqreturn_t
3308bnx2_msi(int irq, void *dev_instance)
3309{
3310	struct bnx2_napi *bnapi = dev_instance;
3311	struct bnx2 *bp = bnapi->bp;
3312
3313	prefetch(bnapi->status_blk.msi);
3314	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3315		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3316		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3317
3318	/* Return here if interrupt is disabled. */
3319	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3320		return IRQ_HANDLED;
3321
3322	napi_schedule(&bnapi->napi);
3323
3324	return IRQ_HANDLED;
3325}
3326
3327static irqreturn_t
3328bnx2_msi_1shot(int irq, void *dev_instance)
3329{
3330	struct bnx2_napi *bnapi = dev_instance;
3331	struct bnx2 *bp = bnapi->bp;
3332
3333	prefetch(bnapi->status_blk.msi);
3334
3335	/* Return here if interrupt is disabled. */
3336	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3337		return IRQ_HANDLED;
3338
3339	napi_schedule(&bnapi->napi);
3340
3341	return IRQ_HANDLED;
3342}
3343
3344static irqreturn_t
3345bnx2_interrupt(int irq, void *dev_instance)
3346{
3347	struct bnx2_napi *bnapi = dev_instance;
3348	struct bnx2 *bp = bnapi->bp;
3349	struct status_block *sblk = bnapi->status_blk.msi;
3350
3351	/* When using INTx, it is possible for the interrupt to arrive
3352	 * at the CPU before the status block posted prior to the
3353	 * interrupt. Reading a register will flush the status block.
3354	 * When using MSI, the MSI message will always complete after
3355	 * the status block write.
3356	 */
3357	if ((sblk->status_idx == bnapi->last_status_idx) &&
3358	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3359	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3360		return IRQ_NONE;
3361
3362	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3363		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3364		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3365
3366	/* Read back to deassert IRQ immediately to avoid too many
3367	 * spurious interrupts.
3368	 */
3369	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3370
3371	/* Return here if interrupt is shared and is disabled. */
3372	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3373		return IRQ_HANDLED;
3374
3375	if (napi_schedule_prep(&bnapi->napi)) {
3376		bnapi->last_status_idx = sblk->status_idx;
3377		__napi_schedule(&bnapi->napi);
3378	}
3379
3380	return IRQ_HANDLED;
3381}
3382
3383static inline int
3384bnx2_has_fast_work(struct bnx2_napi *bnapi)
3385{
3386	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3387	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3388
3389	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3390	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3391		return 1;
3392	return 0;
3393}
3394
3395#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3396				 STATUS_ATTN_BITS_TIMER_ABORT)
3397
3398static inline int
3399bnx2_has_work(struct bnx2_napi *bnapi)
3400{
3401	struct status_block *sblk = bnapi->status_blk.msi;
3402
3403	if (bnx2_has_fast_work(bnapi))
3404		return 1;
3405
3406#ifdef BCM_CNIC
3407	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3408		return 1;
3409#endif
3410
3411	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3412	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3413		return 1;
3414
3415	return 0;
3416}
3417
3418static void
3419bnx2_chk_missed_msi(struct bnx2 *bp)
3420{
3421	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3422	u32 msi_ctrl;
3423
3424	if (bnx2_has_work(bnapi)) {
3425		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3426		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3427			return;
3428
3429		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3430			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3431				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3432			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3433			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3434		}
3435	}
3436
3437	bp->idle_chk_status_idx = bnapi->last_status_idx;
3438}
3439
3440#ifdef BCM_CNIC
3441static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3442{
3443	struct cnic_ops *c_ops;
3444
3445	if (!bnapi->cnic_present)
3446		return;
3447
3448	rcu_read_lock();
3449	c_ops = rcu_dereference(bp->cnic_ops);
3450	if (c_ops)
3451		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3452						      bnapi->status_blk.msi);
3453	rcu_read_unlock();
3454}
3455#endif
3456
3457static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3458{
3459	struct status_block *sblk = bnapi->status_blk.msi;
3460	u32 status_attn_bits = sblk->status_attn_bits;
3461	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3462
3463	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3464	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3465
3466		bnx2_phy_int(bp, bnapi);
3467
3468		/* This is needed to take care of transient status
3469		 * during link changes.
3470		 */
3471		BNX2_WR(bp, BNX2_HC_COMMAND,
3472			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3473		BNX2_RD(bp, BNX2_HC_COMMAND);
3474	}
3475}
3476
3477static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3478			  int work_done, int budget)
3479{
3480	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3481	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3482
3483	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3484		bnx2_tx_int(bp, bnapi, 0);
3485
3486	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3487		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3488
3489	return work_done;
3490}
3491
3492static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3493{
3494	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3495	struct bnx2 *bp = bnapi->bp;
3496	int work_done = 0;
3497	struct status_block_msix *sblk = bnapi->status_blk.msix;
3498
3499	while (1) {
3500		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3501		if (unlikely(work_done >= budget))
3502			break;
3503
3504		bnapi->last_status_idx = sblk->status_idx;
3505		/* status idx must be read before checking for more work. */
3506		rmb();
3507		if (likely(!bnx2_has_fast_work(bnapi))) {
3508
3509			napi_complete_done(napi, work_done);
3510			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3511				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512				bnapi->last_status_idx);
3513			break;
3514		}
3515	}
3516	return work_done;
3517}
3518
3519static int bnx2_poll(struct napi_struct *napi, int budget)
3520{
3521	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3522	struct bnx2 *bp = bnapi->bp;
3523	int work_done = 0;
3524	struct status_block *sblk = bnapi->status_blk.msi;
3525
3526	while (1) {
3527		bnx2_poll_link(bp, bnapi);
3528
3529		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3530
3531#ifdef BCM_CNIC
3532		bnx2_poll_cnic(bp, bnapi);
3533#endif
3534
3535		/* bnapi->last_status_idx is used below to tell the hw how
3536		 * much work has been processed, so we must read it before
3537		 * checking for more work.
3538		 */
3539		bnapi->last_status_idx = sblk->status_idx;
3540
3541		if (unlikely(work_done >= budget))
3542			break;
3543
3544		rmb();
3545		if (likely(!bnx2_has_work(bnapi))) {
3546			napi_complete_done(napi, work_done);
3547			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3548				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3549					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3550					bnapi->last_status_idx);
3551				break;
3552			}
3553			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3554				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3555				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3556				bnapi->last_status_idx);
3557
3558			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3559				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3560				bnapi->last_status_idx);
3561			break;
3562		}
3563	}
3564
3565	return work_done;
3566}
3567
3568/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3569 * from set_multicast.
3570 */
3571static void
3572bnx2_set_rx_mode(struct net_device *dev)
3573{
3574	struct bnx2 *bp = netdev_priv(dev);
3575	u32 rx_mode, sort_mode;
3576	struct netdev_hw_addr *ha;
3577	int i;
3578
3579	if (!netif_running(dev))
3580		return;
3581
3582	spin_lock_bh(&bp->phy_lock);
3583
3584	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3585				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3586	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3587	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3588	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3589		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3590	if (dev->flags & IFF_PROMISC) {
3591		/* Promiscuous mode. */
3592		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3593		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3594			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3595	}
3596	else if (dev->flags & IFF_ALLMULTI) {
3597		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3598			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3599				0xffffffff);
3600		}
3601		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3602	}
3603	else {
3604		/* Accept one or more multicast(s). */
3605		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3606		u32 regidx;
3607		u32 bit;
3608		u32 crc;
3609
3610		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3611
3612		netdev_for_each_mc_addr(ha, dev) {
3613			crc = ether_crc_le(ETH_ALEN, ha->addr);
3614			bit = crc & 0xff;
3615			regidx = (bit & 0xe0) >> 5;
3616			bit &= 0x1f;
3617			mc_filter[regidx] |= (1 << bit);
3618		}
3619
3620		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3621			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3622				mc_filter[i]);
3623		}
3624
3625		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3626	}
3627
3628	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3629		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3630		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3631			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3632	} else if (!(dev->flags & IFF_PROMISC)) {
3633		/* Add all entries into to the match filter list */
3634		i = 0;
3635		netdev_for_each_uc_addr(ha, dev) {
3636			bnx2_set_mac_addr(bp, ha->addr,
3637					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3638			sort_mode |= (1 <<
3639				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3640			i++;
3641		}
3642
3643	}
3644
3645	if (rx_mode != bp->rx_mode) {
3646		bp->rx_mode = rx_mode;
3647		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3648	}
3649
3650	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3651	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3652	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3653
3654	spin_unlock_bh(&bp->phy_lock);
3655}
3656
3657static int
3658check_fw_section(const struct firmware *fw,
3659		 const struct bnx2_fw_file_section *section,
3660		 u32 alignment, bool non_empty)
3661{
3662	u32 offset = be32_to_cpu(section->offset);
3663	u32 len = be32_to_cpu(section->len);
3664
3665	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3666		return -EINVAL;
3667	if ((non_empty && len == 0) || len > fw->size - offset ||
3668	    len & (alignment - 1))
3669		return -EINVAL;
3670	return 0;
3671}
3672
3673static int
3674check_mips_fw_entry(const struct firmware *fw,
3675		    const struct bnx2_mips_fw_file_entry *entry)
3676{
3677	if (check_fw_section(fw, &entry->text, 4, true) ||
3678	    check_fw_section(fw, &entry->data, 4, false) ||
3679	    check_fw_section(fw, &entry->rodata, 4, false))
3680		return -EINVAL;
3681	return 0;
3682}
3683
3684static void bnx2_release_firmware(struct bnx2 *bp)
3685{
3686	if (bp->rv2p_firmware) {
3687		release_firmware(bp->mips_firmware);
3688		release_firmware(bp->rv2p_firmware);
3689		bp->rv2p_firmware = NULL;
3690	}
3691}
3692
3693static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3694{
3695	const char *mips_fw_file, *rv2p_fw_file;
3696	const struct bnx2_mips_fw_file *mips_fw;
3697	const struct bnx2_rv2p_fw_file *rv2p_fw;
3698	int rc;
3699
3700	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3701		mips_fw_file = FW_MIPS_FILE_09;
3702		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3703		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3704			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3705		else
3706			rv2p_fw_file = FW_RV2P_FILE_09;
3707	} else {
3708		mips_fw_file = FW_MIPS_FILE_06;
3709		rv2p_fw_file = FW_RV2P_FILE_06;
3710	}
3711
3712	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3713	if (rc) {
3714		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3715		goto out;
3716	}
3717
3718	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3719	if (rc) {
3720		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3721		goto err_release_mips_firmware;
3722	}
3723	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3724	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3725	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3726	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3727	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3728	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3729	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3730	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3731		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3732		rc = -EINVAL;
3733		goto err_release_firmware;
3734	}
3735	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3736	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3737	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3738		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3739		rc = -EINVAL;
3740		goto err_release_firmware;
3741	}
3742out:
3743	return rc;
3744
3745err_release_firmware:
3746	release_firmware(bp->rv2p_firmware);
3747	bp->rv2p_firmware = NULL;
3748err_release_mips_firmware:
3749	release_firmware(bp->mips_firmware);
3750	goto out;
3751}
3752
3753static int bnx2_request_firmware(struct bnx2 *bp)
3754{
3755	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3756}
3757
3758static u32
3759rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3760{
3761	switch (idx) {
3762	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3763		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3764		rv2p_code |= RV2P_BD_PAGE_SIZE;
3765		break;
3766	}
3767	return rv2p_code;
3768}
3769
3770static int
3771load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3772	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3773{
3774	u32 rv2p_code_len, file_offset;
3775	__be32 *rv2p_code;
3776	int i;
3777	u32 val, cmd, addr;
3778
3779	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3780	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3781
3782	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3783
3784	if (rv2p_proc == RV2P_PROC1) {
3785		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3786		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3787	} else {
3788		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3789		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3790	}
3791
3792	for (i = 0; i < rv2p_code_len; i += 8) {
3793		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3794		rv2p_code++;
3795		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3796		rv2p_code++;
3797
3798		val = (i / 8) | cmd;
3799		BNX2_WR(bp, addr, val);
3800	}
3801
3802	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3803	for (i = 0; i < 8; i++) {
3804		u32 loc, code;
3805
3806		loc = be32_to_cpu(fw_entry->fixup[i]);
3807		if (loc && ((loc * 4) < rv2p_code_len)) {
3808			code = be32_to_cpu(*(rv2p_code + loc - 1));
3809			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3810			code = be32_to_cpu(*(rv2p_code + loc));
3811			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3812			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3813
3814			val = (loc / 2) | cmd;
3815			BNX2_WR(bp, addr, val);
3816		}
3817	}
3818
3819	/* Reset the processor, un-stall is done later. */
3820	if (rv2p_proc == RV2P_PROC1) {
3821		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3822	}
3823	else {
3824		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3825	}
3826
3827	return 0;
3828}
3829
3830static void
3831load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3832	    const struct bnx2_mips_fw_file_entry *fw_entry)
3833{
3834	u32 addr, len, file_offset;
3835	__be32 *data;
3836	u32 offset;
3837	u32 val;
3838
3839	/* Halt the CPU. */
3840	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3841	val |= cpu_reg->mode_value_halt;
3842	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3843	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3844
3845	/* Load the Text area. */
3846	addr = be32_to_cpu(fw_entry->text.addr);
3847	len = be32_to_cpu(fw_entry->text.len);
3848	file_offset = be32_to_cpu(fw_entry->text.offset);
3849	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3850
3851	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3852	if (len) {
3853		int j;
3854
3855		for (j = 0; j < (len / 4); j++, offset += 4)
3856			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3857	}
3858
3859	/* Load the Data area. */
3860	addr = be32_to_cpu(fw_entry->data.addr);
3861	len = be32_to_cpu(fw_entry->data.len);
3862	file_offset = be32_to_cpu(fw_entry->data.offset);
3863	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3864
3865	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3866	if (len) {
3867		int j;
3868
3869		for (j = 0; j < (len / 4); j++, offset += 4)
3870			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3871	}
3872
3873	/* Load the Read-Only area. */
3874	addr = be32_to_cpu(fw_entry->rodata.addr);
3875	len = be32_to_cpu(fw_entry->rodata.len);
3876	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3877	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3878
3879	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3880	if (len) {
3881		int j;
3882
3883		for (j = 0; j < (len / 4); j++, offset += 4)
3884			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3885	}
3886
3887	/* Clear the pre-fetch instruction. */
3888	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3889
3890	val = be32_to_cpu(fw_entry->start_addr);
3891	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3892
3893	/* Start the CPU. */
3894	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3895	val &= ~cpu_reg->mode_value_halt;
3896	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3897	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
 
 
3898}
3899
3900static void
3901bnx2_init_cpus(struct bnx2 *bp)
3902{
3903	const struct bnx2_mips_fw_file *mips_fw =
3904		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3905	const struct bnx2_rv2p_fw_file *rv2p_fw =
3906		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
 
3907
3908	/* Initialize the RV2P processor. */
3909	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3910	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3911
3912	/* Initialize the RX Processor. */
3913	load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
 
 
3914
3915	/* Initialize the TX Processor. */
3916	load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
 
 
3917
3918	/* Initialize the TX Patch-up Processor. */
3919	load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
 
 
3920
3921	/* Initialize the Completion Processor. */
3922	load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
 
 
3923
3924	/* Initialize the Command Processor. */
3925	load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
 
 
 
3926}
3927
3928static void
3929bnx2_setup_wol(struct bnx2 *bp)
3930{
3931	int i;
3932	u32 val, wol_msg;
3933
3934	if (bp->wol) {
3935		u32 advertising;
3936		u8 autoneg;
3937
3938		autoneg = bp->autoneg;
3939		advertising = bp->advertising;
3940
3941		if (bp->phy_port == PORT_TP) {
3942			bp->autoneg = AUTONEG_SPEED;
3943			bp->advertising = ADVERTISED_10baseT_Half |
3944				ADVERTISED_10baseT_Full |
3945				ADVERTISED_100baseT_Half |
3946				ADVERTISED_100baseT_Full |
3947				ADVERTISED_Autoneg;
3948		}
3949
3950		spin_lock_bh(&bp->phy_lock);
3951		bnx2_setup_phy(bp, bp->phy_port);
3952		spin_unlock_bh(&bp->phy_lock);
3953
3954		bp->autoneg = autoneg;
3955		bp->advertising = advertising;
3956
3957		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3958
3959		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3960
3961		/* Enable port mode. */
3962		val &= ~BNX2_EMAC_MODE_PORT;
3963		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3964		       BNX2_EMAC_MODE_ACPI_RCVD |
3965		       BNX2_EMAC_MODE_MPKT;
3966		if (bp->phy_port == PORT_TP) {
3967			val |= BNX2_EMAC_MODE_PORT_MII;
3968		} else {
3969			val |= BNX2_EMAC_MODE_PORT_GMII;
3970			if (bp->line_speed == SPEED_2500)
3971				val |= BNX2_EMAC_MODE_25G_MODE;
3972		}
3973
3974		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3975
3976		/* receive all multicast */
3977		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3978			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3979				0xffffffff);
3980		}
3981		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3982
3983		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
3984		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3985		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3986		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3987
3988		/* Need to enable EMAC and RPM for WOL. */
3989		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3990			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3991			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3992			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3993
3994		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3995		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3996		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3997
3998		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3999	} else {
4000			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4001	}
4002
4003	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4004		u32 val;
4005
4006		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4007		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4008			bnx2_fw_sync(bp, wol_msg, 1, 0);
4009			return;
4010		}
4011		/* Tell firmware not to power down the PHY yet, otherwise
4012		 * the chip will take a long time to respond to MMIO reads.
4013		 */
4014		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4015		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4016			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4017		bnx2_fw_sync(bp, wol_msg, 1, 0);
4018		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4019	}
4020
4021}
4022
4023static int
4024bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4025{
4026	switch (state) {
4027	case PCI_D0: {
4028		u32 val;
4029
4030		pci_enable_wake(bp->pdev, PCI_D0, false);
4031		pci_set_power_state(bp->pdev, PCI_D0);
4032
4033		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4034		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4035		val &= ~BNX2_EMAC_MODE_MPKT;
4036		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4037
4038		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4039		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4040		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4041		break;
4042	}
4043	case PCI_D3hot: {
4044		bnx2_setup_wol(bp);
4045		pci_wake_from_d3(bp->pdev, bp->wol);
4046		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4047		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4048
4049			if (bp->wol)
4050				pci_set_power_state(bp->pdev, PCI_D3hot);
4051			break;
4052
4053		}
4054		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4055			u32 val;
4056
4057			/* Tell firmware not to power down the PHY yet,
4058			 * otherwise the other port may not respond to
4059			 * MMIO reads.
4060			 */
4061			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4062			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4063			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4064			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4065		}
4066		pci_set_power_state(bp->pdev, PCI_D3hot);
4067
4068		/* No more memory access after this point until
4069		 * device is brought back to D0.
4070		 */
4071		break;
4072	}
4073	default:
4074		return -EINVAL;
4075	}
4076	return 0;
4077}
4078
4079static int
4080bnx2_acquire_nvram_lock(struct bnx2 *bp)
4081{
4082	u32 val;
4083	int j;
4084
4085	/* Request access to the flash interface. */
4086	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4087	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4089		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4090			break;
4091
4092		udelay(5);
4093	}
4094
4095	if (j >= NVRAM_TIMEOUT_COUNT)
4096		return -EBUSY;
4097
4098	return 0;
4099}
4100
4101static int
4102bnx2_release_nvram_lock(struct bnx2 *bp)
4103{
4104	int j;
4105	u32 val;
4106
4107	/* Relinquish nvram interface. */
4108	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4109
4110	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4111		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4112		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4113			break;
4114
4115		udelay(5);
4116	}
4117
4118	if (j >= NVRAM_TIMEOUT_COUNT)
4119		return -EBUSY;
4120
4121	return 0;
4122}
4123
4124
4125static int
4126bnx2_enable_nvram_write(struct bnx2 *bp)
4127{
4128	u32 val;
4129
4130	val = BNX2_RD(bp, BNX2_MISC_CFG);
4131	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4132
4133	if (bp->flash_info->flags & BNX2_NV_WREN) {
4134		int j;
4135
4136		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4137		BNX2_WR(bp, BNX2_NVM_COMMAND,
4138			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4139
4140		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4141			udelay(5);
4142
4143			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4144			if (val & BNX2_NVM_COMMAND_DONE)
4145				break;
4146		}
4147
4148		if (j >= NVRAM_TIMEOUT_COUNT)
4149			return -EBUSY;
4150	}
4151	return 0;
4152}
4153
4154static void
4155bnx2_disable_nvram_write(struct bnx2 *bp)
4156{
4157	u32 val;
4158
4159	val = BNX2_RD(bp, BNX2_MISC_CFG);
4160	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4161}
4162
4163
4164static void
4165bnx2_enable_nvram_access(struct bnx2 *bp)
4166{
4167	u32 val;
4168
4169	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4170	/* Enable both bits, even on read. */
4171	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4172		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4173}
4174
4175static void
4176bnx2_disable_nvram_access(struct bnx2 *bp)
4177{
4178	u32 val;
4179
4180	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4181	/* Disable both bits, even after read. */
4182	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4183		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4184			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4185}
4186
4187static int
4188bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4189{
4190	u32 cmd;
4191	int j;
4192
4193	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4194		/* Buffered flash, no erase needed */
4195		return 0;
4196
4197	/* Build an erase command */
4198	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4199	      BNX2_NVM_COMMAND_DOIT;
4200
4201	/* Need to clear DONE bit separately. */
4202	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4203
4204	/* Address of the NVRAM to read from. */
4205	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4206
4207	/* Issue an erase command. */
4208	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4209
4210	/* Wait for completion. */
4211	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4212		u32 val;
4213
4214		udelay(5);
4215
4216		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4217		if (val & BNX2_NVM_COMMAND_DONE)
4218			break;
4219	}
4220
4221	if (j >= NVRAM_TIMEOUT_COUNT)
4222		return -EBUSY;
4223
4224	return 0;
4225}
4226
4227static int
4228bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4229{
4230	u32 cmd;
4231	int j;
4232
4233	/* Build the command word. */
4234	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4235
4236	/* Calculate an offset of a buffered flash, not needed for 5709. */
4237	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238		offset = ((offset / bp->flash_info->page_size) <<
4239			   bp->flash_info->page_bits) +
4240			  (offset % bp->flash_info->page_size);
4241	}
4242
4243	/* Need to clear DONE bit separately. */
4244	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4245
4246	/* Address of the NVRAM to read from. */
4247	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4248
4249	/* Issue a read command. */
4250	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4251
4252	/* Wait for completion. */
4253	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4254		u32 val;
4255
4256		udelay(5);
4257
4258		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4259		if (val & BNX2_NVM_COMMAND_DONE) {
4260			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4261			memcpy(ret_val, &v, 4);
4262			break;
4263		}
4264	}
4265	if (j >= NVRAM_TIMEOUT_COUNT)
4266		return -EBUSY;
4267
4268	return 0;
4269}
4270
4271
4272static int
4273bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4274{
4275	u32 cmd;
4276	__be32 val32;
4277	int j;
4278
4279	/* Build the command word. */
4280	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4281
4282	/* Calculate an offset of a buffered flash, not needed for 5709. */
4283	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4284		offset = ((offset / bp->flash_info->page_size) <<
4285			  bp->flash_info->page_bits) +
4286			 (offset % bp->flash_info->page_size);
4287	}
4288
4289	/* Need to clear DONE bit separately. */
4290	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4291
4292	memcpy(&val32, val, 4);
4293
4294	/* Write the data. */
4295	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4296
4297	/* Address of the NVRAM to write to. */
4298	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4299
4300	/* Issue the write command. */
4301	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4302
4303	/* Wait for completion. */
4304	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4305		udelay(5);
4306
4307		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4308			break;
4309	}
4310	if (j >= NVRAM_TIMEOUT_COUNT)
4311		return -EBUSY;
4312
4313	return 0;
4314}
4315
4316static int
4317bnx2_init_nvram(struct bnx2 *bp)
4318{
4319	u32 val;
4320	int j, entry_count, rc = 0;
4321	const struct flash_spec *flash;
4322
4323	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4324		bp->flash_info = &flash_5709;
4325		goto get_flash_size;
4326	}
4327
4328	/* Determine the selected interface. */
4329	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4330
4331	entry_count = ARRAY_SIZE(flash_table);
4332
4333	if (val & 0x40000000) {
4334
4335		/* Flash interface has been reconfigured */
4336		for (j = 0, flash = &flash_table[0]; j < entry_count;
4337		     j++, flash++) {
4338			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4339			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4340				bp->flash_info = flash;
4341				break;
4342			}
4343		}
4344	}
4345	else {
4346		u32 mask;
4347		/* Not yet been reconfigured */
4348
4349		if (val & (1 << 23))
4350			mask = FLASH_BACKUP_STRAP_MASK;
4351		else
4352			mask = FLASH_STRAP_MASK;
4353
4354		for (j = 0, flash = &flash_table[0]; j < entry_count;
4355			j++, flash++) {
4356
4357			if ((val & mask) == (flash->strapping & mask)) {
4358				bp->flash_info = flash;
4359
4360				/* Request access to the flash interface. */
4361				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4362					return rc;
4363
4364				/* Enable access to flash interface */
4365				bnx2_enable_nvram_access(bp);
4366
4367				/* Reconfigure the flash interface */
4368				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4369				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4370				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4371				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4372
4373				/* Disable access to flash interface */
4374				bnx2_disable_nvram_access(bp);
4375				bnx2_release_nvram_lock(bp);
4376
4377				break;
4378			}
4379		}
4380	} /* if (val & 0x40000000) */
4381
4382	if (j == entry_count) {
4383		bp->flash_info = NULL;
4384		pr_alert("Unknown flash/EEPROM type\n");
4385		return -ENODEV;
4386	}
4387
4388get_flash_size:
4389	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4390	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4391	if (val)
4392		bp->flash_size = val;
4393	else
4394		bp->flash_size = bp->flash_info->total_size;
4395
4396	return rc;
4397}
4398
4399static int
4400bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4401		int buf_size)
4402{
4403	int rc = 0;
4404	u32 cmd_flags, offset32, len32, extra;
4405
4406	if (buf_size == 0)
4407		return 0;
4408
4409	/* Request access to the flash interface. */
4410	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4411		return rc;
4412
4413	/* Enable access to flash interface */
4414	bnx2_enable_nvram_access(bp);
4415
4416	len32 = buf_size;
4417	offset32 = offset;
4418	extra = 0;
4419
4420	cmd_flags = 0;
4421
4422	if (offset32 & 3) {
4423		u8 buf[4];
4424		u32 pre_len;
4425
4426		offset32 &= ~3;
4427		pre_len = 4 - (offset & 3);
4428
4429		if (pre_len >= len32) {
4430			pre_len = len32;
4431			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4432				    BNX2_NVM_COMMAND_LAST;
4433		}
4434		else {
4435			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4436		}
4437
4438		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4439
4440		if (rc)
4441			return rc;
4442
4443		memcpy(ret_buf, buf + (offset & 3), pre_len);
4444
4445		offset32 += 4;
4446		ret_buf += pre_len;
4447		len32 -= pre_len;
4448	}
4449	if (len32 & 3) {
4450		extra = 4 - (len32 & 3);
4451		len32 = (len32 + 4) & ~3;
4452	}
4453
4454	if (len32 == 4) {
4455		u8 buf[4];
4456
4457		if (cmd_flags)
4458			cmd_flags = BNX2_NVM_COMMAND_LAST;
4459		else
4460			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4461				    BNX2_NVM_COMMAND_LAST;
4462
4463		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4464
4465		memcpy(ret_buf, buf, 4 - extra);
4466	}
4467	else if (len32 > 0) {
4468		u8 buf[4];
4469
4470		/* Read the first word. */
4471		if (cmd_flags)
4472			cmd_flags = 0;
4473		else
4474			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4475
4476		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4477
4478		/* Advance to the next dword. */
4479		offset32 += 4;
4480		ret_buf += 4;
4481		len32 -= 4;
4482
4483		while (len32 > 4 && rc == 0) {
4484			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4485
4486			/* Advance to the next dword. */
4487			offset32 += 4;
4488			ret_buf += 4;
4489			len32 -= 4;
4490		}
4491
4492		if (rc)
4493			return rc;
4494
4495		cmd_flags = BNX2_NVM_COMMAND_LAST;
4496		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4497
4498		memcpy(ret_buf, buf, 4 - extra);
4499	}
4500
4501	/* Disable access to flash interface */
4502	bnx2_disable_nvram_access(bp);
4503
4504	bnx2_release_nvram_lock(bp);
4505
4506	return rc;
4507}
4508
4509static int
4510bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4511		int buf_size)
4512{
4513	u32 written, offset32, len32;
4514	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4515	int rc = 0;
4516	int align_start, align_end;
4517
4518	buf = data_buf;
4519	offset32 = offset;
4520	len32 = buf_size;
4521	align_start = align_end = 0;
4522
4523	if ((align_start = (offset32 & 3))) {
4524		offset32 &= ~3;
4525		len32 += align_start;
4526		if (len32 < 4)
4527			len32 = 4;
4528		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4529			return rc;
4530	}
4531
4532	if (len32 & 3) {
4533		align_end = 4 - (len32 & 3);
4534		len32 += align_end;
4535		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4536			return rc;
4537	}
4538
4539	if (align_start || align_end) {
4540		align_buf = kmalloc(len32, GFP_KERNEL);
4541		if (!align_buf)
4542			return -ENOMEM;
4543		if (align_start) {
4544			memcpy(align_buf, start, 4);
4545		}
4546		if (align_end) {
4547			memcpy(align_buf + len32 - 4, end, 4);
4548		}
4549		memcpy(align_buf + align_start, data_buf, buf_size);
4550		buf = align_buf;
4551	}
4552
4553	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4554		flash_buffer = kmalloc(264, GFP_KERNEL);
4555		if (!flash_buffer) {
4556			rc = -ENOMEM;
4557			goto nvram_write_end;
4558		}
4559	}
4560
4561	written = 0;
4562	while ((written < len32) && (rc == 0)) {
4563		u32 page_start, page_end, data_start, data_end;
4564		u32 addr, cmd_flags;
4565		int i;
4566
4567	        /* Find the page_start addr */
4568		page_start = offset32 + written;
4569		page_start -= (page_start % bp->flash_info->page_size);
4570		/* Find the page_end addr */
4571		page_end = page_start + bp->flash_info->page_size;
4572		/* Find the data_start addr */
4573		data_start = (written == 0) ? offset32 : page_start;
4574		/* Find the data_end addr */
4575		data_end = (page_end > offset32 + len32) ?
4576			(offset32 + len32) : page_end;
4577
4578		/* Request access to the flash interface. */
4579		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4580			goto nvram_write_end;
4581
4582		/* Enable access to flash interface */
4583		bnx2_enable_nvram_access(bp);
4584
4585		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4586		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4587			int j;
4588
4589			/* Read the whole page into the buffer
4590			 * (non-buffer flash only) */
4591			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4592				if (j == (bp->flash_info->page_size - 4)) {
4593					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4594				}
4595				rc = bnx2_nvram_read_dword(bp,
4596					page_start + j,
4597					&flash_buffer[j],
4598					cmd_flags);
4599
4600				if (rc)
4601					goto nvram_write_end;
4602
4603				cmd_flags = 0;
4604			}
4605		}
4606
4607		/* Enable writes to flash interface (unlock write-protect) */
4608		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4609			goto nvram_write_end;
4610
4611		/* Loop to write back the buffer data from page_start to
4612		 * data_start */
4613		i = 0;
4614		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4615			/* Erase the page */
4616			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4617				goto nvram_write_end;
4618
4619			/* Re-enable the write again for the actual write */
4620			bnx2_enable_nvram_write(bp);
4621
4622			for (addr = page_start; addr < data_start;
4623				addr += 4, i += 4) {
4624
4625				rc = bnx2_nvram_write_dword(bp, addr,
4626					&flash_buffer[i], cmd_flags);
4627
4628				if (rc != 0)
4629					goto nvram_write_end;
4630
4631				cmd_flags = 0;
4632			}
4633		}
4634
4635		/* Loop to write the new data from data_start to data_end */
4636		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4637			if ((addr == page_end - 4) ||
4638				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4639				 (addr == data_end - 4))) {
4640
4641				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4642			}
4643			rc = bnx2_nvram_write_dword(bp, addr, buf,
4644				cmd_flags);
4645
4646			if (rc != 0)
4647				goto nvram_write_end;
4648
4649			cmd_flags = 0;
4650			buf += 4;
4651		}
4652
4653		/* Loop to write back the buffer data from data_end
4654		 * to page_end */
4655		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4656			for (addr = data_end; addr < page_end;
4657				addr += 4, i += 4) {
4658
4659				if (addr == page_end-4) {
4660					cmd_flags = BNX2_NVM_COMMAND_LAST;
4661				}
4662				rc = bnx2_nvram_write_dword(bp, addr,
4663					&flash_buffer[i], cmd_flags);
4664
4665				if (rc != 0)
4666					goto nvram_write_end;
4667
4668				cmd_flags = 0;
4669			}
4670		}
4671
4672		/* Disable writes to flash interface (lock write-protect) */
4673		bnx2_disable_nvram_write(bp);
4674
4675		/* Disable access to flash interface */
4676		bnx2_disable_nvram_access(bp);
4677		bnx2_release_nvram_lock(bp);
4678
4679		/* Increment written */
4680		written += data_end - data_start;
4681	}
4682
4683nvram_write_end:
4684	kfree(flash_buffer);
4685	kfree(align_buf);
4686	return rc;
4687}
4688
4689static void
4690bnx2_init_fw_cap(struct bnx2 *bp)
4691{
4692	u32 val, sig = 0;
4693
4694	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4695	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4696
4697	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4698		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4699
4700	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4701	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4702		return;
4703
4704	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4705		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4706		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4707	}
4708
4709	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4710	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4711		u32 link;
4712
4713		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4714
4715		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4716		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4717			bp->phy_port = PORT_FIBRE;
4718		else
4719			bp->phy_port = PORT_TP;
4720
4721		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4722		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4723	}
4724
4725	if (netif_running(bp->dev) && sig)
4726		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4727}
4728
4729static void
4730bnx2_setup_msix_tbl(struct bnx2 *bp)
4731{
4732	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4733
4734	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4735	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4736}
4737
4738static void
4739bnx2_wait_dma_complete(struct bnx2 *bp)
4740{
4741	u32 val;
4742	int i;
4743
4744	/*
4745	 * Wait for the current PCI transaction to complete before
4746	 * issuing a reset.
4747	 */
4748	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4749	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4750		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4751			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4752			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4753			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4754			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4755		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4756		udelay(5);
4757	} else {  /* 5709 */
4758		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4759		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4760		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4761		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4762
4763		for (i = 0; i < 100; i++) {
4764			msleep(1);
4765			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4766			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4767				break;
4768		}
4769	}
4770
4771	return;
4772}
4773
4774
4775static int
4776bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4777{
4778	u32 val;
4779	int i, rc = 0;
4780	u8 old_port;
4781
4782	/* Wait for the current PCI transaction to complete before
4783	 * issuing a reset. */
4784	bnx2_wait_dma_complete(bp);
4785
4786	/* Wait for the firmware to tell us it is ok to issue a reset. */
4787	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4788
4789	/* Deposit a driver reset signature so the firmware knows that
4790	 * this is a soft reset. */
4791	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4792		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4793
4794	/* Do a dummy read to force the chip to complete all current transaction
4795	 * before we issue a reset. */
4796	val = BNX2_RD(bp, BNX2_MISC_ID);
4797
4798	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4799		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4800		BNX2_RD(bp, BNX2_MISC_COMMAND);
4801		udelay(5);
4802
4803		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4804		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4805
4806		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4807
4808	} else {
4809		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4810		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4811		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4812
4813		/* Chip reset. */
4814		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4815
4816		/* Reading back any register after chip reset will hang the
4817		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4818		 * of margin for write posting.
4819		 */
4820		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4821		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4822			msleep(20);
4823
4824		/* Reset takes approximate 30 usec */
4825		for (i = 0; i < 10; i++) {
4826			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4827			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4828				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4829				break;
4830			udelay(10);
4831		}
4832
4833		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4834			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4835			pr_err("Chip reset did not complete\n");
4836			return -EBUSY;
4837		}
4838	}
4839
4840	/* Make sure byte swapping is properly configured. */
4841	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4842	if (val != 0x01020304) {
4843		pr_err("Chip not in correct endian mode\n");
4844		return -ENODEV;
4845	}
4846
4847	/* Wait for the firmware to finish its initialization. */
4848	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4849	if (rc)
4850		return rc;
4851
4852	spin_lock_bh(&bp->phy_lock);
4853	old_port = bp->phy_port;
4854	bnx2_init_fw_cap(bp);
4855	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4856	    old_port != bp->phy_port)
4857		bnx2_set_default_remote_link(bp);
4858	spin_unlock_bh(&bp->phy_lock);
4859
4860	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4861		/* Adjust the voltage regular to two steps lower.  The default
4862		 * of this register is 0x0000000e. */
4863		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4864
4865		/* Remove bad rbuf memory from the free pool. */
4866		rc = bnx2_alloc_bad_rbuf(bp);
4867	}
4868
4869	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4870		bnx2_setup_msix_tbl(bp);
4871		/* Prevent MSIX table reads and write from timing out */
4872		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4873			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4874	}
4875
4876	return rc;
4877}
4878
4879static int
4880bnx2_init_chip(struct bnx2 *bp)
4881{
4882	u32 val, mtu;
4883	int rc, i;
4884
4885	/* Make sure the interrupt is not active. */
4886	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4887
4888	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4889	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4890#ifdef __BIG_ENDIAN
4891	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4892#endif
4893	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4894	      DMA_READ_CHANS << 12 |
4895	      DMA_WRITE_CHANS << 16;
4896
4897	val |= (0x2 << 20) | (1 << 11);
4898
4899	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4900		val |= (1 << 23);
4901
4902	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4903	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4904	    !(bp->flags & BNX2_FLAG_PCIX))
4905		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4906
4907	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4908
4909	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4910		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4911		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4912		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4913	}
4914
4915	if (bp->flags & BNX2_FLAG_PCIX) {
4916		u16 val16;
4917
4918		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4919				     &val16);
4920		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4921				      val16 & ~PCI_X_CMD_ERO);
4922	}
4923
4924	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4925		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4926		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4927		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4928
4929	/* Initialize context mapping and zero out the quick contexts.  The
4930	 * context block must have already been enabled. */
4931	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4932		rc = bnx2_init_5709_context(bp);
4933		if (rc)
4934			return rc;
4935	} else
4936		bnx2_init_context(bp);
4937
4938	bnx2_init_cpus(bp);
 
4939
4940	bnx2_init_nvram(bp);
4941
4942	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4943
4944	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4945	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4946	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4947	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4949		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4950			val |= BNX2_MQ_CONFIG_HALT_DIS;
4951	}
4952
4953	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4954
4955	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4956	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4957	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4958
4959	val = (BNX2_PAGE_BITS - 8) << 24;
4960	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4961
4962	/* Configure page size. */
4963	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4964	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4965	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4966	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4967
4968	val = bp->mac_addr[0] +
4969	      (bp->mac_addr[1] << 8) +
4970	      (bp->mac_addr[2] << 16) +
4971	      bp->mac_addr[3] +
4972	      (bp->mac_addr[4] << 8) +
4973	      (bp->mac_addr[5] << 16);
4974	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4975
4976	/* Program the MTU.  Also include 4 bytes for CRC32. */
4977	mtu = bp->dev->mtu;
4978	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4979	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4980		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4981	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4982
4983	if (mtu < ETH_DATA_LEN)
4984		mtu = ETH_DATA_LEN;
4985
4986	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4987	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4988	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4989
4990	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4991	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4992		bp->bnx2_napi[i].last_status_idx = 0;
4993
4994	bp->idle_chk_status_idx = 0xffff;
4995
4996	/* Set up how to generate a link change interrupt. */
4997	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4998
4999	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5000		(u64) bp->status_blk_mapping & 0xffffffff);
5001	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5002
5003	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5004		(u64) bp->stats_blk_mapping & 0xffffffff);
5005	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5006		(u64) bp->stats_blk_mapping >> 32);
5007
5008	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5009		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5010
5011	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5012		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5013
5014	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5015		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5016
5017	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5018
5019	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5020
5021	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5022		(bp->com_ticks_int << 16) | bp->com_ticks);
5023
5024	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5025		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5026
5027	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5028		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5029	else
5030		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5031	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5032
5033	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5034		val = BNX2_HC_CONFIG_COLLECT_STATS;
5035	else {
5036		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5037		      BNX2_HC_CONFIG_COLLECT_STATS;
5038	}
5039
5040	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5041		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5042			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5043
5044		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5045	}
5046
5047	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5048		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5049
5050	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5051
5052	if (bp->rx_ticks < 25)
5053		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5054	else
5055		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5056
5057	for (i = 1; i < bp->irq_nvecs; i++) {
5058		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5059			   BNX2_HC_SB_CONFIG_1;
5060
5061		BNX2_WR(bp, base,
5062			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5063			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5064			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5065
5066		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5067			(bp->tx_quick_cons_trip_int << 16) |
5068			 bp->tx_quick_cons_trip);
5069
5070		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5071			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5072
5073		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5074			(bp->rx_quick_cons_trip_int << 16) |
5075			bp->rx_quick_cons_trip);
5076
5077		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5078			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5079	}
5080
5081	/* Clear internal stats counters. */
5082	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5083
5084	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5085
5086	/* Initialize the receive filter. */
5087	bnx2_set_rx_mode(bp->dev);
5088
5089	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5090		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5091		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5092		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5093	}
5094	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5095			  1, 0);
5096
5097	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5098	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5099
5100	udelay(20);
5101
5102	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5103
5104	return rc;
5105}
5106
5107static void
5108bnx2_clear_ring_states(struct bnx2 *bp)
5109{
5110	struct bnx2_napi *bnapi;
5111	struct bnx2_tx_ring_info *txr;
5112	struct bnx2_rx_ring_info *rxr;
5113	int i;
5114
5115	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5116		bnapi = &bp->bnx2_napi[i];
5117		txr = &bnapi->tx_ring;
5118		rxr = &bnapi->rx_ring;
5119
5120		txr->tx_cons = 0;
5121		txr->hw_tx_cons = 0;
5122		rxr->rx_prod_bseq = 0;
5123		rxr->rx_prod = 0;
5124		rxr->rx_cons = 0;
5125		rxr->rx_pg_prod = 0;
5126		rxr->rx_pg_cons = 0;
5127	}
5128}
5129
5130static void
5131bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5132{
5133	u32 val, offset0, offset1, offset2, offset3;
5134	u32 cid_addr = GET_CID_ADDR(cid);
5135
5136	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5137		offset0 = BNX2_L2CTX_TYPE_XI;
5138		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5139		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5140		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5141	} else {
5142		offset0 = BNX2_L2CTX_TYPE;
5143		offset1 = BNX2_L2CTX_CMD_TYPE;
5144		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5145		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5146	}
5147	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5148	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5149
5150	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5151	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5152
5153	val = (u64) txr->tx_desc_mapping >> 32;
5154	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5155
5156	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5157	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5158}
5159
5160static void
5161bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5162{
5163	struct bnx2_tx_bd *txbd;
5164	u32 cid = TX_CID;
5165	struct bnx2_napi *bnapi;
5166	struct bnx2_tx_ring_info *txr;
5167
5168	bnapi = &bp->bnx2_napi[ring_num];
5169	txr = &bnapi->tx_ring;
5170
5171	if (ring_num == 0)
5172		cid = TX_CID;
5173	else
5174		cid = TX_TSS_CID + ring_num - 1;
5175
5176	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5177
5178	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5179
5180	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5181	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5182
5183	txr->tx_prod = 0;
5184	txr->tx_prod_bseq = 0;
5185
5186	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5187	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5188
5189	bnx2_init_tx_context(bp, cid, txr);
5190}
5191
5192static void
5193bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5194		     u32 buf_size, int num_rings)
5195{
5196	int i;
5197	struct bnx2_rx_bd *rxbd;
5198
5199	for (i = 0; i < num_rings; i++) {
5200		int j;
5201
5202		rxbd = &rx_ring[i][0];
5203		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5204			rxbd->rx_bd_len = buf_size;
5205			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5206		}
5207		if (i == (num_rings - 1))
5208			j = 0;
5209		else
5210			j = i + 1;
5211		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5212		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5213	}
5214}
5215
5216static void
5217bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5218{
5219	int i;
5220	u16 prod, ring_prod;
5221	u32 cid, rx_cid_addr, val;
5222	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5223	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5224
5225	if (ring_num == 0)
5226		cid = RX_CID;
5227	else
5228		cid = RX_RSS_CID + ring_num - 1;
5229
5230	rx_cid_addr = GET_CID_ADDR(cid);
5231
5232	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5233			     bp->rx_buf_use_size, bp->rx_max_ring);
5234
5235	bnx2_init_rx_context(bp, cid);
5236
5237	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5238		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5239		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5240	}
5241
5242	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5243	if (bp->rx_pg_ring_size) {
5244		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5245				     rxr->rx_pg_desc_mapping,
5246				     PAGE_SIZE, bp->rx_max_pg_ring);
5247		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5248		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5249		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5250		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5251
5252		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5253		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5254
5255		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5256		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5257
5258		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5259			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5260	}
5261
5262	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5263	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5264
5265	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5266	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5267
5268	ring_prod = prod = rxr->rx_pg_prod;
5269	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5270		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5271			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5272				    ring_num, i, bp->rx_pg_ring_size);
5273			break;
5274		}
5275		prod = BNX2_NEXT_RX_BD(prod);
5276		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5277	}
5278	rxr->rx_pg_prod = prod;
5279
5280	ring_prod = prod = rxr->rx_prod;
5281	for (i = 0; i < bp->rx_ring_size; i++) {
5282		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5283			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5284				    ring_num, i, bp->rx_ring_size);
5285			break;
5286		}
5287		prod = BNX2_NEXT_RX_BD(prod);
5288		ring_prod = BNX2_RX_RING_IDX(prod);
5289	}
5290	rxr->rx_prod = prod;
5291
5292	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5293	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5294	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5295
5296	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5297	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5298
5299	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5300}
5301
5302static void
5303bnx2_init_all_rings(struct bnx2 *bp)
5304{
5305	int i;
5306	u32 val;
5307
5308	bnx2_clear_ring_states(bp);
5309
5310	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5311	for (i = 0; i < bp->num_tx_rings; i++)
5312		bnx2_init_tx_ring(bp, i);
5313
5314	if (bp->num_tx_rings > 1)
5315		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5316			(TX_TSS_CID << 7));
5317
5318	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5319	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5320
5321	for (i = 0; i < bp->num_rx_rings; i++)
5322		bnx2_init_rx_ring(bp, i);
5323
5324	if (bp->num_rx_rings > 1) {
5325		u32 tbl_32 = 0;
5326
5327		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5328			int shift = (i % 8) << 2;
5329
5330			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5331			if ((i % 8) == 7) {
5332				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5333				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5334					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5335					BNX2_RLUP_RSS_COMMAND_WRITE |
5336					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5337				tbl_32 = 0;
5338			}
5339		}
5340
5341		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5342		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5343
5344		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5345
5346	}
5347}
5348
5349static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5350{
5351	u32 max, num_rings = 1;
5352
5353	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5354		ring_size -= BNX2_MAX_RX_DESC_CNT;
5355		num_rings++;
5356	}
5357	/* round to next power of 2 */
5358	max = max_size;
5359	while ((max & num_rings) == 0)
5360		max >>= 1;
5361
5362	if (num_rings != max)
5363		max <<= 1;
5364
5365	return max;
5366}
5367
5368static void
5369bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5370{
5371	u32 rx_size, rx_space, jumbo_size;
5372
5373	/* 8 for CRC and VLAN */
5374	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5375
5376	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5377		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5378
5379	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5380	bp->rx_pg_ring_size = 0;
5381	bp->rx_max_pg_ring = 0;
5382	bp->rx_max_pg_ring_idx = 0;
5383	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5384		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5385
5386		jumbo_size = size * pages;
5387		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5388			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5389
5390		bp->rx_pg_ring_size = jumbo_size;
5391		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5392							BNX2_MAX_RX_PG_RINGS);
5393		bp->rx_max_pg_ring_idx =
5394			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5395		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5396		bp->rx_copy_thresh = 0;
5397	}
5398
5399	bp->rx_buf_use_size = rx_size;
5400	/* hw alignment + build_skb() overhead*/
5401	bp->rx_buf_size = kmalloc_size_roundup(
5402		SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5403		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
5404	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5405	bp->rx_ring_size = size;
5406	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5407	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5408}
5409
5410static void
5411bnx2_free_tx_skbs(struct bnx2 *bp)
5412{
5413	int i;
5414
5415	for (i = 0; i < bp->num_tx_rings; i++) {
5416		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5417		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5418		int j;
5419
5420		if (!txr->tx_buf_ring)
5421			continue;
5422
5423		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5424			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5425			struct sk_buff *skb = tx_buf->skb;
5426			int k, last;
5427
5428			if (!skb) {
5429				j = BNX2_NEXT_TX_BD(j);
5430				continue;
5431			}
5432
5433			dma_unmap_single(&bp->pdev->dev,
5434					 dma_unmap_addr(tx_buf, mapping),
5435					 skb_headlen(skb),
5436					 DMA_TO_DEVICE);
5437
5438			tx_buf->skb = NULL;
5439
5440			last = tx_buf->nr_frags;
5441			j = BNX2_NEXT_TX_BD(j);
5442			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5443				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5444				dma_unmap_page(&bp->pdev->dev,
5445					dma_unmap_addr(tx_buf, mapping),
5446					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5447					DMA_TO_DEVICE);
5448			}
5449			dev_kfree_skb(skb);
5450		}
5451		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5452	}
5453}
5454
5455static void
5456bnx2_free_rx_skbs(struct bnx2 *bp)
5457{
5458	int i;
5459
5460	for (i = 0; i < bp->num_rx_rings; i++) {
5461		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5462		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5463		int j;
5464
5465		if (!rxr->rx_buf_ring)
5466			return;
5467
5468		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5469			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5470			u8 *data = rx_buf->data;
5471
5472			if (!data)
5473				continue;
5474
5475			dma_unmap_single(&bp->pdev->dev,
5476					 dma_unmap_addr(rx_buf, mapping),
5477					 bp->rx_buf_use_size,
5478					 DMA_FROM_DEVICE);
5479
5480			rx_buf->data = NULL;
5481
5482			kfree(data);
5483		}
5484		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5485			bnx2_free_rx_page(bp, rxr, j);
5486	}
5487}
5488
5489static void
5490bnx2_free_skbs(struct bnx2 *bp)
5491{
5492	bnx2_free_tx_skbs(bp);
5493	bnx2_free_rx_skbs(bp);
5494}
5495
5496static int
5497bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5498{
5499	int rc;
5500
5501	rc = bnx2_reset_chip(bp, reset_code);
5502	bnx2_free_skbs(bp);
5503	if (rc)
5504		return rc;
5505
5506	if ((rc = bnx2_init_chip(bp)) != 0)
5507		return rc;
5508
5509	bnx2_init_all_rings(bp);
5510	return 0;
5511}
5512
5513static int
5514bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5515{
5516	int rc;
5517
5518	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5519		return rc;
5520
5521	spin_lock_bh(&bp->phy_lock);
5522	bnx2_init_phy(bp, reset_phy);
5523	bnx2_set_link(bp);
5524	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5525		bnx2_remote_phy_event(bp);
5526	spin_unlock_bh(&bp->phy_lock);
5527	return 0;
5528}
5529
5530static int
5531bnx2_shutdown_chip(struct bnx2 *bp)
5532{
5533	u32 reset_code;
5534
5535	if (bp->flags & BNX2_FLAG_NO_WOL)
5536		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5537	else if (bp->wol)
5538		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5539	else
5540		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5541
5542	return bnx2_reset_chip(bp, reset_code);
5543}
5544
5545static int
5546bnx2_test_registers(struct bnx2 *bp)
5547{
5548	int ret;
5549	int i, is_5709;
5550	static const struct {
5551		u16   offset;
5552		u16   flags;
5553#define BNX2_FL_NOT_5709	1
5554		u32   rw_mask;
5555		u32   ro_mask;
5556	} reg_tbl[] = {
5557		{ 0x006c, 0, 0x00000000, 0x0000003f },
5558		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5559		{ 0x0094, 0, 0x00000000, 0x00000000 },
5560
5561		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5562		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5563		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5564		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5565		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5566		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5567		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5568		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5569		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5570
5571		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5572		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5573		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5574		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5575		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5576		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5577
5578		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5579		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5580		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5581
5582		{ 0x1000, 0, 0x00000000, 0x00000001 },
5583		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5584
5585		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5586		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5587		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5588		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5589		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5590		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5591		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5592		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5593		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5594		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5595
5596		{ 0x1800, 0, 0x00000000, 0x00000001 },
5597		{ 0x1804, 0, 0x00000000, 0x00000003 },
5598
5599		{ 0x2800, 0, 0x00000000, 0x00000001 },
5600		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5601		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5602		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5603		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5604		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5605		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5606		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5607		{ 0x2840, 0, 0x00000000, 0xffffffff },
5608		{ 0x2844, 0, 0x00000000, 0xffffffff },
5609		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5610		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5611
5612		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5613		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5614
5615		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5616		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5617		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5618		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5619		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5620		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5621		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5622		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5623		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5624
5625		{ 0x5004, 0, 0x00000000, 0x0000007f },
5626		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5627
5628		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5629		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5630		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5631		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5632		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5633		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5634		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5635		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5636		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5637
5638		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5639		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5640		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5641		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5642		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5643		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5644		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5645		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5646		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5647		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5648		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5649		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5650		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5651		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5652		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5653		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5654		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5655		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5656		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5657		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5658		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5659		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5660		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5661
5662		{ 0xffff, 0, 0x00000000, 0x00000000 },
5663	};
5664
5665	ret = 0;
5666	is_5709 = 0;
5667	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5668		is_5709 = 1;
5669
5670	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5671		u32 offset, rw_mask, ro_mask, save_val, val;
5672		u16 flags = reg_tbl[i].flags;
5673
5674		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5675			continue;
5676
5677		offset = (u32) reg_tbl[i].offset;
5678		rw_mask = reg_tbl[i].rw_mask;
5679		ro_mask = reg_tbl[i].ro_mask;
5680
5681		save_val = readl(bp->regview + offset);
5682
5683		writel(0, bp->regview + offset);
5684
5685		val = readl(bp->regview + offset);
5686		if ((val & rw_mask) != 0) {
5687			goto reg_test_err;
5688		}
5689
5690		if ((val & ro_mask) != (save_val & ro_mask)) {
5691			goto reg_test_err;
5692		}
5693
5694		writel(0xffffffff, bp->regview + offset);
5695
5696		val = readl(bp->regview + offset);
5697		if ((val & rw_mask) != rw_mask) {
5698			goto reg_test_err;
5699		}
5700
5701		if ((val & ro_mask) != (save_val & ro_mask)) {
5702			goto reg_test_err;
5703		}
5704
5705		writel(save_val, bp->regview + offset);
5706		continue;
5707
5708reg_test_err:
5709		writel(save_val, bp->regview + offset);
5710		ret = -ENODEV;
5711		break;
5712	}
5713	return ret;
5714}
5715
5716static int
5717bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5718{
5719	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5720		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5721	int i;
5722
5723	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5724		u32 offset;
5725
5726		for (offset = 0; offset < size; offset += 4) {
5727
5728			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5729
5730			if (bnx2_reg_rd_ind(bp, start + offset) !=
5731				test_pattern[i]) {
5732				return -ENODEV;
5733			}
5734		}
5735	}
5736	return 0;
5737}
5738
5739static int
5740bnx2_test_memory(struct bnx2 *bp)
5741{
5742	int ret = 0;
5743	int i;
5744	static struct mem_entry {
5745		u32   offset;
5746		u32   len;
5747	} mem_tbl_5706[] = {
5748		{ 0x60000,  0x4000 },
5749		{ 0xa0000,  0x3000 },
5750		{ 0xe0000,  0x4000 },
5751		{ 0x120000, 0x4000 },
5752		{ 0x1a0000, 0x4000 },
5753		{ 0x160000, 0x4000 },
5754		{ 0xffffffff, 0    },
5755	},
5756	mem_tbl_5709[] = {
5757		{ 0x60000,  0x4000 },
5758		{ 0xa0000,  0x3000 },
5759		{ 0xe0000,  0x4000 },
5760		{ 0x120000, 0x4000 },
5761		{ 0x1a0000, 0x4000 },
5762		{ 0xffffffff, 0    },
5763	};
5764	struct mem_entry *mem_tbl;
5765
5766	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5767		mem_tbl = mem_tbl_5709;
5768	else
5769		mem_tbl = mem_tbl_5706;
5770
5771	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5772		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5773			mem_tbl[i].len)) != 0) {
5774			return ret;
5775		}
5776	}
5777
5778	return ret;
5779}
5780
5781#define BNX2_MAC_LOOPBACK	0
5782#define BNX2_PHY_LOOPBACK	1
5783
5784static int
5785bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5786{
5787	unsigned int pkt_size, num_pkts, i;
5788	struct sk_buff *skb;
5789	u8 *data;
5790	unsigned char *packet;
5791	u16 rx_start_idx, rx_idx;
5792	dma_addr_t map;
5793	struct bnx2_tx_bd *txbd;
5794	struct bnx2_sw_bd *rx_buf;
5795	struct l2_fhdr *rx_hdr;
5796	int ret = -ENODEV;
5797	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5798	struct bnx2_tx_ring_info *txr;
5799	struct bnx2_rx_ring_info *rxr;
5800
5801	tx_napi = bnapi;
5802
5803	txr = &tx_napi->tx_ring;
5804	rxr = &bnapi->rx_ring;
5805	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5806		bp->loopback = MAC_LOOPBACK;
5807		bnx2_set_mac_loopback(bp);
5808	}
5809	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5810		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5811			return 0;
5812
5813		bp->loopback = PHY_LOOPBACK;
5814		bnx2_set_phy_loopback(bp);
5815	}
5816	else
5817		return -EINVAL;
5818
5819	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5820	skb = netdev_alloc_skb(bp->dev, pkt_size);
5821	if (!skb)
5822		return -ENOMEM;
5823	packet = skb_put(skb, pkt_size);
5824	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5825	memset(packet + ETH_ALEN, 0x0, 8);
5826	for (i = 14; i < pkt_size; i++)
5827		packet[i] = (unsigned char) (i & 0xff);
5828
5829	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5830			     DMA_TO_DEVICE);
5831	if (dma_mapping_error(&bp->pdev->dev, map)) {
5832		dev_kfree_skb(skb);
5833		return -EIO;
5834	}
5835
5836	BNX2_WR(bp, BNX2_HC_COMMAND,
5837		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5838
5839	BNX2_RD(bp, BNX2_HC_COMMAND);
5840
5841	udelay(5);
5842	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5843
5844	num_pkts = 0;
5845
5846	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5847
5848	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5849	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5850	txbd->tx_bd_mss_nbytes = pkt_size;
5851	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5852
5853	num_pkts++;
5854	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5855	txr->tx_prod_bseq += pkt_size;
5856
5857	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5858	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5859
5860	udelay(100);
5861
5862	BNX2_WR(bp, BNX2_HC_COMMAND,
5863		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5864
5865	BNX2_RD(bp, BNX2_HC_COMMAND);
5866
5867	udelay(5);
5868
5869	dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5870	dev_kfree_skb(skb);
5871
5872	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5873		goto loopback_test_done;
5874
5875	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5876	if (rx_idx != rx_start_idx + num_pkts) {
5877		goto loopback_test_done;
5878	}
5879
5880	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5881	data = rx_buf->data;
5882
5883	rx_hdr = get_l2_fhdr(data);
5884	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5885
5886	dma_sync_single_for_cpu(&bp->pdev->dev,
5887		dma_unmap_addr(rx_buf, mapping),
5888		bp->rx_buf_use_size, DMA_FROM_DEVICE);
5889
5890	if (rx_hdr->l2_fhdr_status &
5891		(L2_FHDR_ERRORS_BAD_CRC |
5892		L2_FHDR_ERRORS_PHY_DECODE |
5893		L2_FHDR_ERRORS_ALIGNMENT |
5894		L2_FHDR_ERRORS_TOO_SHORT |
5895		L2_FHDR_ERRORS_GIANT_FRAME)) {
5896
5897		goto loopback_test_done;
5898	}
5899
5900	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5901		goto loopback_test_done;
5902	}
5903
5904	for (i = 14; i < pkt_size; i++) {
5905		if (*(data + i) != (unsigned char) (i & 0xff)) {
5906			goto loopback_test_done;
5907		}
5908	}
5909
5910	ret = 0;
5911
5912loopback_test_done:
5913	bp->loopback = 0;
5914	return ret;
5915}
5916
5917#define BNX2_MAC_LOOPBACK_FAILED	1
5918#define BNX2_PHY_LOOPBACK_FAILED	2
5919#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5920					 BNX2_PHY_LOOPBACK_FAILED)
5921
5922static int
5923bnx2_test_loopback(struct bnx2 *bp)
5924{
5925	int rc = 0;
5926
5927	if (!netif_running(bp->dev))
5928		return BNX2_LOOPBACK_FAILED;
5929
5930	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5931	spin_lock_bh(&bp->phy_lock);
5932	bnx2_init_phy(bp, 1);
5933	spin_unlock_bh(&bp->phy_lock);
5934	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5935		rc |= BNX2_MAC_LOOPBACK_FAILED;
5936	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5937		rc |= BNX2_PHY_LOOPBACK_FAILED;
5938	return rc;
5939}
5940
5941#define NVRAM_SIZE 0x200
5942#define CRC32_RESIDUAL 0xdebb20e3
5943
5944static int
5945bnx2_test_nvram(struct bnx2 *bp)
5946{
5947	__be32 buf[NVRAM_SIZE / 4];
5948	u8 *data = (u8 *) buf;
5949	int rc = 0;
5950	u32 magic, csum;
5951
5952	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5953		goto test_nvram_done;
5954
5955        magic = be32_to_cpu(buf[0]);
5956	if (magic != 0x669955aa) {
5957		rc = -ENODEV;
5958		goto test_nvram_done;
5959	}
5960
5961	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5962		goto test_nvram_done;
5963
5964	csum = ether_crc_le(0x100, data);
5965	if (csum != CRC32_RESIDUAL) {
5966		rc = -ENODEV;
5967		goto test_nvram_done;
5968	}
5969
5970	csum = ether_crc_le(0x100, data + 0x100);
5971	if (csum != CRC32_RESIDUAL) {
5972		rc = -ENODEV;
5973	}
5974
5975test_nvram_done:
5976	return rc;
5977}
5978
5979static int
5980bnx2_test_link(struct bnx2 *bp)
5981{
5982	u32 bmsr;
5983
5984	if (!netif_running(bp->dev))
5985		return -ENODEV;
5986
5987	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5988		if (bp->link_up)
5989			return 0;
5990		return -ENODEV;
5991	}
5992	spin_lock_bh(&bp->phy_lock);
5993	bnx2_enable_bmsr1(bp);
5994	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5995	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5996	bnx2_disable_bmsr1(bp);
5997	spin_unlock_bh(&bp->phy_lock);
5998
5999	if (bmsr & BMSR_LSTATUS) {
6000		return 0;
6001	}
6002	return -ENODEV;
6003}
6004
6005static int
6006bnx2_test_intr(struct bnx2 *bp)
6007{
6008	int i;
6009	u16 status_idx;
6010
6011	if (!netif_running(bp->dev))
6012		return -ENODEV;
6013
6014	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6015
6016	/* This register is not touched during run-time. */
6017	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6018	BNX2_RD(bp, BNX2_HC_COMMAND);
6019
6020	for (i = 0; i < 10; i++) {
6021		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6022			status_idx) {
6023
6024			break;
6025		}
6026
6027		msleep_interruptible(10);
6028	}
6029	if (i < 10)
6030		return 0;
6031
6032	return -ENODEV;
6033}
6034
6035/* Determining link for parallel detection. */
6036static int
6037bnx2_5706_serdes_has_link(struct bnx2 *bp)
6038{
6039	u32 mode_ctl, an_dbg, exp;
6040
6041	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6042		return 0;
6043
6044	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6045	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6046
6047	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6048		return 0;
6049
6050	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6051	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6052	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6053
6054	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6055		return 0;
6056
6057	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6058	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6059	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6060
6061	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6062		return 0;
6063
6064	return 1;
6065}
6066
6067static void
6068bnx2_5706_serdes_timer(struct bnx2 *bp)
6069{
6070	int check_link = 1;
6071
6072	spin_lock(&bp->phy_lock);
6073	if (bp->serdes_an_pending) {
6074		bp->serdes_an_pending--;
6075		check_link = 0;
6076	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6077		u32 bmcr;
6078
6079		bp->current_interval = BNX2_TIMER_INTERVAL;
6080
6081		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6082
6083		if (bmcr & BMCR_ANENABLE) {
6084			if (bnx2_5706_serdes_has_link(bp)) {
6085				bmcr &= ~BMCR_ANENABLE;
6086				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6087				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6088				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6089			}
6090		}
6091	}
6092	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6093		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6094		u32 phy2;
6095
6096		bnx2_write_phy(bp, 0x17, 0x0f01);
6097		bnx2_read_phy(bp, 0x15, &phy2);
6098		if (phy2 & 0x20) {
6099			u32 bmcr;
6100
6101			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6102			bmcr |= BMCR_ANENABLE;
6103			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104
6105			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6106		}
6107	} else
6108		bp->current_interval = BNX2_TIMER_INTERVAL;
6109
6110	if (check_link) {
6111		u32 val;
6112
6113		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6114		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6115		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6116
6117		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6118			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6119				bnx2_5706s_force_link_dn(bp, 1);
6120				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6121			} else
6122				bnx2_set_link(bp);
6123		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6124			bnx2_set_link(bp);
6125	}
6126	spin_unlock(&bp->phy_lock);
6127}
6128
6129static void
6130bnx2_5708_serdes_timer(struct bnx2 *bp)
6131{
6132	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6133		return;
6134
6135	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6136		bp->serdes_an_pending = 0;
6137		return;
6138	}
6139
6140	spin_lock(&bp->phy_lock);
6141	if (bp->serdes_an_pending)
6142		bp->serdes_an_pending--;
6143	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6144		u32 bmcr;
6145
6146		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6147		if (bmcr & BMCR_ANENABLE) {
6148			bnx2_enable_forced_2g5(bp);
6149			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6150		} else {
6151			bnx2_disable_forced_2g5(bp);
6152			bp->serdes_an_pending = 2;
6153			bp->current_interval = BNX2_TIMER_INTERVAL;
6154		}
6155
6156	} else
6157		bp->current_interval = BNX2_TIMER_INTERVAL;
6158
6159	spin_unlock(&bp->phy_lock);
6160}
6161
6162static void
6163bnx2_timer(struct timer_list *t)
6164{
6165	struct bnx2 *bp = from_timer(bp, t, timer);
6166
6167	if (!netif_running(bp->dev))
6168		return;
6169
6170	if (atomic_read(&bp->intr_sem) != 0)
6171		goto bnx2_restart_timer;
6172
6173	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6174	     BNX2_FLAG_USING_MSI)
6175		bnx2_chk_missed_msi(bp);
6176
6177	bnx2_send_heart_beat(bp);
6178
6179	bp->stats_blk->stat_FwRxDrop =
6180		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6181
6182	/* workaround occasional corrupted counters */
6183	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6184		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6185			BNX2_HC_COMMAND_STATS_NOW);
6186
6187	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6188		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6189			bnx2_5706_serdes_timer(bp);
6190		else
6191			bnx2_5708_serdes_timer(bp);
6192	}
6193
6194bnx2_restart_timer:
6195	mod_timer(&bp->timer, jiffies + bp->current_interval);
6196}
6197
6198static int
6199bnx2_request_irq(struct bnx2 *bp)
6200{
6201	unsigned long flags;
6202	struct bnx2_irq *irq;
6203	int rc = 0, i;
6204
6205	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6206		flags = 0;
6207	else
6208		flags = IRQF_SHARED;
6209
6210	for (i = 0; i < bp->irq_nvecs; i++) {
6211		irq = &bp->irq_tbl[i];
6212		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6213				 &bp->bnx2_napi[i]);
6214		if (rc)
6215			break;
6216		irq->requested = 1;
6217	}
6218	return rc;
6219}
6220
6221static void
6222__bnx2_free_irq(struct bnx2 *bp)
6223{
6224	struct bnx2_irq *irq;
6225	int i;
6226
6227	for (i = 0; i < bp->irq_nvecs; i++) {
6228		irq = &bp->irq_tbl[i];
6229		if (irq->requested)
6230			free_irq(irq->vector, &bp->bnx2_napi[i]);
6231		irq->requested = 0;
6232	}
6233}
6234
6235static void
6236bnx2_free_irq(struct bnx2 *bp)
6237{
6238
6239	__bnx2_free_irq(bp);
6240	if (bp->flags & BNX2_FLAG_USING_MSI)
6241		pci_disable_msi(bp->pdev);
6242	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6243		pci_disable_msix(bp->pdev);
6244
6245	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6246}
6247
6248static void
6249bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6250{
6251	int i, total_vecs;
6252	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6253	struct net_device *dev = bp->dev;
6254	const int len = sizeof(bp->irq_tbl[0].name);
6255
6256	bnx2_setup_msix_tbl(bp);
6257	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6258	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6259	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6260
6261	/*  Need to flush the previous three writes to ensure MSI-X
6262	 *  is setup properly */
6263	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6264
6265	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6266		msix_ent[i].entry = i;
6267		msix_ent[i].vector = 0;
6268	}
6269
6270	total_vecs = msix_vecs;
6271#ifdef BCM_CNIC
6272	total_vecs++;
6273#endif
6274	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6275					   BNX2_MIN_MSIX_VEC, total_vecs);
6276	if (total_vecs < 0)
6277		return;
6278
6279	msix_vecs = total_vecs;
6280#ifdef BCM_CNIC
6281	msix_vecs--;
6282#endif
6283	bp->irq_nvecs = msix_vecs;
6284	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6285	for (i = 0; i < total_vecs; i++) {
6286		bp->irq_tbl[i].vector = msix_ent[i].vector;
6287		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6288		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6289	}
6290}
6291
6292static int
6293bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6294{
6295	int cpus = netif_get_num_default_rss_queues();
6296	int msix_vecs;
6297
6298	if (!bp->num_req_rx_rings)
6299		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6300	else if (!bp->num_req_tx_rings)
6301		msix_vecs = max(cpus, bp->num_req_rx_rings);
6302	else
6303		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6304
6305	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6306
6307	bp->irq_tbl[0].handler = bnx2_interrupt;
6308	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6309	bp->irq_nvecs = 1;
6310	bp->irq_tbl[0].vector = bp->pdev->irq;
6311
6312	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6313		bnx2_enable_msix(bp, msix_vecs);
6314
6315	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6316	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6317		if (pci_enable_msi(bp->pdev) == 0) {
6318			bp->flags |= BNX2_FLAG_USING_MSI;
6319			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6320				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6321				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6322			} else
6323				bp->irq_tbl[0].handler = bnx2_msi;
6324
6325			bp->irq_tbl[0].vector = bp->pdev->irq;
6326		}
6327	}
6328
6329	if (!bp->num_req_tx_rings)
6330		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6331	else
6332		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6333
6334	if (!bp->num_req_rx_rings)
6335		bp->num_rx_rings = bp->irq_nvecs;
6336	else
6337		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6338
6339	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6340
6341	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6342}
6343
6344/* Called with rtnl_lock */
6345static int
6346bnx2_open(struct net_device *dev)
6347{
6348	struct bnx2 *bp = netdev_priv(dev);
6349	int rc;
6350
6351	rc = bnx2_request_firmware(bp);
6352	if (rc < 0)
6353		goto out;
6354
6355	netif_carrier_off(dev);
6356
6357	bnx2_disable_int(bp);
6358
6359	rc = bnx2_setup_int_mode(bp, disable_msi);
6360	if (rc)
6361		goto open_err;
6362	bnx2_init_napi(bp);
6363	bnx2_napi_enable(bp);
6364	rc = bnx2_alloc_mem(bp);
6365	if (rc)
6366		goto open_err;
6367
6368	rc = bnx2_request_irq(bp);
6369	if (rc)
6370		goto open_err;
6371
6372	rc = bnx2_init_nic(bp, 1);
6373	if (rc)
6374		goto open_err;
6375
6376	mod_timer(&bp->timer, jiffies + bp->current_interval);
6377
6378	atomic_set(&bp->intr_sem, 0);
6379
6380	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6381
6382	bnx2_enable_int(bp);
6383
6384	if (bp->flags & BNX2_FLAG_USING_MSI) {
6385		/* Test MSI to make sure it is working
6386		 * If MSI test fails, go back to INTx mode
6387		 */
6388		if (bnx2_test_intr(bp) != 0) {
6389			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6390
6391			bnx2_disable_int(bp);
6392			bnx2_free_irq(bp);
6393
6394			bnx2_setup_int_mode(bp, 1);
6395
6396			rc = bnx2_init_nic(bp, 0);
6397
6398			if (!rc)
6399				rc = bnx2_request_irq(bp);
6400
6401			if (rc) {
6402				del_timer_sync(&bp->timer);
6403				goto open_err;
6404			}
6405			bnx2_enable_int(bp);
6406		}
6407	}
6408	if (bp->flags & BNX2_FLAG_USING_MSI)
6409		netdev_info(dev, "using MSI\n");
6410	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6411		netdev_info(dev, "using MSIX\n");
6412
6413	netif_tx_start_all_queues(dev);
6414out:
6415	return rc;
6416
6417open_err:
6418	bnx2_napi_disable(bp);
6419	bnx2_free_skbs(bp);
6420	bnx2_free_irq(bp);
6421	bnx2_free_mem(bp);
6422	bnx2_del_napi(bp);
6423	bnx2_release_firmware(bp);
6424	goto out;
6425}
6426
6427static void
6428bnx2_reset_task(struct work_struct *work)
6429{
6430	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6431	int rc;
6432	u16 pcicmd;
6433
6434	rtnl_lock();
6435	if (!netif_running(bp->dev)) {
6436		rtnl_unlock();
6437		return;
6438	}
6439
6440	bnx2_netif_stop(bp, true);
6441
6442	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6443	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6444		/* in case PCI block has reset */
6445		pci_restore_state(bp->pdev);
6446		pci_save_state(bp->pdev);
6447	}
6448	rc = bnx2_init_nic(bp, 1);
6449	if (rc) {
6450		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6451		bnx2_napi_enable(bp);
6452		dev_close(bp->dev);
6453		rtnl_unlock();
6454		return;
6455	}
6456
6457	atomic_set(&bp->intr_sem, 1);
6458	bnx2_netif_start(bp, true);
6459	rtnl_unlock();
6460}
6461
6462#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6463
6464static void
6465bnx2_dump_ftq(struct bnx2 *bp)
6466{
6467	int i;
6468	u32 reg, bdidx, cid, valid;
6469	struct net_device *dev = bp->dev;
6470	static const struct ftq_reg {
6471		char *name;
6472		u32 off;
6473	} ftq_arr[] = {
6474		BNX2_FTQ_ENTRY(RV2P_P),
6475		BNX2_FTQ_ENTRY(RV2P_T),
6476		BNX2_FTQ_ENTRY(RV2P_M),
6477		BNX2_FTQ_ENTRY(TBDR_),
6478		BNX2_FTQ_ENTRY(TDMA_),
6479		BNX2_FTQ_ENTRY(TXP_),
6480		BNX2_FTQ_ENTRY(TXP_),
6481		BNX2_FTQ_ENTRY(TPAT_),
6482		BNX2_FTQ_ENTRY(RXP_C),
6483		BNX2_FTQ_ENTRY(RXP_),
6484		BNX2_FTQ_ENTRY(COM_COMXQ_),
6485		BNX2_FTQ_ENTRY(COM_COMTQ_),
6486		BNX2_FTQ_ENTRY(COM_COMQ_),
6487		BNX2_FTQ_ENTRY(CP_CPQ_),
6488	};
6489
6490	netdev_err(dev, "<--- start FTQ dump --->\n");
6491	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6492		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6493			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6494
6495	netdev_err(dev, "CPU states:\n");
6496	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6497		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6498			   reg, bnx2_reg_rd_ind(bp, reg),
6499			   bnx2_reg_rd_ind(bp, reg + 4),
6500			   bnx2_reg_rd_ind(bp, reg + 8),
6501			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6502			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6503			   bnx2_reg_rd_ind(bp, reg + 0x20));
6504
6505	netdev_err(dev, "<--- end FTQ dump --->\n");
6506	netdev_err(dev, "<--- start TBDC dump --->\n");
6507	netdev_err(dev, "TBDC free cnt: %ld\n",
6508		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6509	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6510	for (i = 0; i < 0x20; i++) {
6511		int j = 0;
6512
6513		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6514		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6515			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6516		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6517		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6518			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6519			j++;
6520
6521		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6522		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6523		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6524		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6525			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6526			   bdidx >> 24, (valid >> 8) & 0x0ff);
6527	}
6528	netdev_err(dev, "<--- end TBDC dump --->\n");
6529}
6530
6531static void
6532bnx2_dump_state(struct bnx2 *bp)
6533{
6534	struct net_device *dev = bp->dev;
6535	u32 val1, val2;
6536
6537	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6538	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6539		   atomic_read(&bp->intr_sem), val1);
6540	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6541	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6542	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6543	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6544		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6545		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6546	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6547		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6548	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6549		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6550	if (bp->flags & BNX2_FLAG_USING_MSIX)
6551		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6552			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6553}
6554
6555static void
6556bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6557{
6558	struct bnx2 *bp = netdev_priv(dev);
6559
6560	bnx2_dump_ftq(bp);
6561	bnx2_dump_state(bp);
6562	bnx2_dump_mcp_state(bp);
6563
6564	/* This allows the netif to be shutdown gracefully before resetting */
6565	schedule_work(&bp->reset_task);
6566}
6567
6568/* Called with netif_tx_lock.
6569 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6570 * netif_wake_queue().
6571 */
6572static netdev_tx_t
6573bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6574{
6575	struct bnx2 *bp = netdev_priv(dev);
6576	dma_addr_t mapping;
6577	struct bnx2_tx_bd *txbd;
6578	struct bnx2_sw_tx_bd *tx_buf;
6579	u32 len, vlan_tag_flags, last_frag, mss;
6580	u16 prod, ring_prod;
6581	int i;
6582	struct bnx2_napi *bnapi;
6583	struct bnx2_tx_ring_info *txr;
6584	struct netdev_queue *txq;
6585
6586	/*  Determine which tx ring we will be placed on */
6587	i = skb_get_queue_mapping(skb);
6588	bnapi = &bp->bnx2_napi[i];
6589	txr = &bnapi->tx_ring;
6590	txq = netdev_get_tx_queue(dev, i);
6591
6592	if (unlikely(bnx2_tx_avail(bp, txr) <
6593	    (skb_shinfo(skb)->nr_frags + 1))) {
6594		netif_tx_stop_queue(txq);
6595		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6596
6597		return NETDEV_TX_BUSY;
6598	}
6599	len = skb_headlen(skb);
6600	prod = txr->tx_prod;
6601	ring_prod = BNX2_TX_RING_IDX(prod);
6602
6603	vlan_tag_flags = 0;
6604	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6605		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6606	}
6607
6608	if (skb_vlan_tag_present(skb)) {
6609		vlan_tag_flags |=
6610			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6611	}
6612
6613	if ((mss = skb_shinfo(skb)->gso_size)) {
6614		u32 tcp_opt_len;
6615		struct iphdr *iph;
6616
6617		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6618
6619		tcp_opt_len = tcp_optlen(skb);
6620
6621		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6622			u32 tcp_off = skb_transport_offset(skb) -
6623				      sizeof(struct ipv6hdr) - ETH_HLEN;
6624
6625			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6626					  TX_BD_FLAGS_SW_FLAGS;
6627			if (likely(tcp_off == 0))
6628				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6629			else {
6630				tcp_off >>= 3;
6631				vlan_tag_flags |= ((tcp_off & 0x3) <<
6632						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6633						  ((tcp_off & 0x10) <<
6634						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6635				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6636			}
6637		} else {
6638			iph = ip_hdr(skb);
6639			if (tcp_opt_len || (iph->ihl > 5)) {
6640				vlan_tag_flags |= ((iph->ihl - 5) +
6641						   (tcp_opt_len >> 2)) << 8;
6642			}
6643		}
6644	} else
6645		mss = 0;
6646
6647	mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6648				 DMA_TO_DEVICE);
6649	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6650		dev_kfree_skb_any(skb);
6651		return NETDEV_TX_OK;
6652	}
6653
6654	tx_buf = &txr->tx_buf_ring[ring_prod];
6655	tx_buf->skb = skb;
6656	dma_unmap_addr_set(tx_buf, mapping, mapping);
6657
6658	txbd = &txr->tx_desc_ring[ring_prod];
6659
6660	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6661	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6662	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6663	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6664
6665	last_frag = skb_shinfo(skb)->nr_frags;
6666	tx_buf->nr_frags = last_frag;
6667	tx_buf->is_gso = skb_is_gso(skb);
6668
6669	for (i = 0; i < last_frag; i++) {
6670		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6671
6672		prod = BNX2_NEXT_TX_BD(prod);
6673		ring_prod = BNX2_TX_RING_IDX(prod);
6674		txbd = &txr->tx_desc_ring[ring_prod];
6675
6676		len = skb_frag_size(frag);
6677		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6678					   DMA_TO_DEVICE);
6679		if (dma_mapping_error(&bp->pdev->dev, mapping))
6680			goto dma_error;
6681		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6682				   mapping);
6683
6684		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6685		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6686		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6687		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6688
6689	}
6690	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6691
6692	/* Sync BD data before updating TX mailbox */
6693	wmb();
6694
6695	netdev_tx_sent_queue(txq, skb->len);
6696
6697	prod = BNX2_NEXT_TX_BD(prod);
6698	txr->tx_prod_bseq += skb->len;
6699
6700	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6701	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6702
6703	txr->tx_prod = prod;
6704
6705	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6706		netif_tx_stop_queue(txq);
6707
6708		/* netif_tx_stop_queue() must be done before checking
6709		 * tx index in bnx2_tx_avail() below, because in
6710		 * bnx2_tx_int(), we update tx index before checking for
6711		 * netif_tx_queue_stopped().
6712		 */
6713		smp_mb();
6714		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6715			netif_tx_wake_queue(txq);
6716	}
6717
6718	return NETDEV_TX_OK;
6719dma_error:
6720	/* save value of frag that failed */
6721	last_frag = i;
6722
6723	/* start back at beginning and unmap skb */
6724	prod = txr->tx_prod;
6725	ring_prod = BNX2_TX_RING_IDX(prod);
6726	tx_buf = &txr->tx_buf_ring[ring_prod];
6727	tx_buf->skb = NULL;
6728	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6729			 skb_headlen(skb), DMA_TO_DEVICE);
6730
6731	/* unmap remaining mapped pages */
6732	for (i = 0; i < last_frag; i++) {
6733		prod = BNX2_NEXT_TX_BD(prod);
6734		ring_prod = BNX2_TX_RING_IDX(prod);
6735		tx_buf = &txr->tx_buf_ring[ring_prod];
6736		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6737			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6738			       DMA_TO_DEVICE);
6739	}
6740
6741	dev_kfree_skb_any(skb);
6742	return NETDEV_TX_OK;
6743}
6744
6745/* Called with rtnl_lock */
6746static int
6747bnx2_close(struct net_device *dev)
6748{
6749	struct bnx2 *bp = netdev_priv(dev);
6750
6751	bnx2_disable_int_sync(bp);
6752	bnx2_napi_disable(bp);
6753	netif_tx_disable(dev);
6754	del_timer_sync(&bp->timer);
6755	bnx2_shutdown_chip(bp);
6756	bnx2_free_irq(bp);
6757	bnx2_free_skbs(bp);
6758	bnx2_free_mem(bp);
6759	bnx2_del_napi(bp);
6760	bp->link_up = 0;
6761	netif_carrier_off(bp->dev);
6762	return 0;
6763}
6764
6765static void
6766bnx2_save_stats(struct bnx2 *bp)
6767{
6768	u32 *hw_stats = (u32 *) bp->stats_blk;
6769	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6770	int i;
6771
6772	/* The 1st 10 counters are 64-bit counters */
6773	for (i = 0; i < 20; i += 2) {
6774		u32 hi;
6775		u64 lo;
6776
6777		hi = temp_stats[i] + hw_stats[i];
6778		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6779		if (lo > 0xffffffff)
6780			hi++;
6781		temp_stats[i] = hi;
6782		temp_stats[i + 1] = lo & 0xffffffff;
6783	}
6784
6785	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6786		temp_stats[i] += hw_stats[i];
6787}
6788
6789#define GET_64BIT_NET_STATS64(ctr)		\
6790	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6791
6792#define GET_64BIT_NET_STATS(ctr)				\
6793	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6794	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6795
6796#define GET_32BIT_NET_STATS(ctr)				\
6797	(unsigned long) (bp->stats_blk->ctr +			\
6798			 bp->temp_stats_blk->ctr)
6799
6800static void
6801bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6802{
6803	struct bnx2 *bp = netdev_priv(dev);
6804
6805	if (!bp->stats_blk)
6806		return;
6807
6808	net_stats->rx_packets =
6809		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6810		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6811		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6812
6813	net_stats->tx_packets =
6814		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6815		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6816		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6817
6818	net_stats->rx_bytes =
6819		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6820
6821	net_stats->tx_bytes =
6822		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6823
6824	net_stats->multicast =
6825		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6826
6827	net_stats->collisions =
6828		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6829
6830	net_stats->rx_length_errors =
6831		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6832		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6833
6834	net_stats->rx_over_errors =
6835		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6836		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6837
6838	net_stats->rx_frame_errors =
6839		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6840
6841	net_stats->rx_crc_errors =
6842		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6843
6844	net_stats->rx_errors = net_stats->rx_length_errors +
6845		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6846		net_stats->rx_crc_errors;
6847
6848	net_stats->tx_aborted_errors =
6849		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6850		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6851
6852	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6853	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6854		net_stats->tx_carrier_errors = 0;
6855	else {
6856		net_stats->tx_carrier_errors =
6857			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6858	}
6859
6860	net_stats->tx_errors =
6861		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6862		net_stats->tx_aborted_errors +
6863		net_stats->tx_carrier_errors;
6864
6865	net_stats->rx_missed_errors =
6866		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6867		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6868		GET_32BIT_NET_STATS(stat_FwRxDrop);
6869
6870}
6871
6872/* All ethtool functions called with rtnl_lock */
6873
6874static int
6875bnx2_get_link_ksettings(struct net_device *dev,
6876			struct ethtool_link_ksettings *cmd)
6877{
6878	struct bnx2 *bp = netdev_priv(dev);
6879	int support_serdes = 0, support_copper = 0;
6880	u32 supported, advertising;
6881
6882	supported = SUPPORTED_Autoneg;
6883	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6884		support_serdes = 1;
6885		support_copper = 1;
6886	} else if (bp->phy_port == PORT_FIBRE)
6887		support_serdes = 1;
6888	else
6889		support_copper = 1;
6890
6891	if (support_serdes) {
6892		supported |= SUPPORTED_1000baseT_Full |
6893			SUPPORTED_FIBRE;
6894		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6895			supported |= SUPPORTED_2500baseX_Full;
6896	}
6897	if (support_copper) {
6898		supported |= SUPPORTED_10baseT_Half |
6899			SUPPORTED_10baseT_Full |
6900			SUPPORTED_100baseT_Half |
6901			SUPPORTED_100baseT_Full |
6902			SUPPORTED_1000baseT_Full |
6903			SUPPORTED_TP;
6904	}
6905
6906	spin_lock_bh(&bp->phy_lock);
6907	cmd->base.port = bp->phy_port;
6908	advertising = bp->advertising;
6909
6910	if (bp->autoneg & AUTONEG_SPEED) {
6911		cmd->base.autoneg = AUTONEG_ENABLE;
6912	} else {
6913		cmd->base.autoneg = AUTONEG_DISABLE;
6914	}
6915
6916	if (netif_carrier_ok(dev)) {
6917		cmd->base.speed = bp->line_speed;
6918		cmd->base.duplex = bp->duplex;
6919		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6920			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6921				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6922			else
6923				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6924		}
6925	}
6926	else {
6927		cmd->base.speed = SPEED_UNKNOWN;
6928		cmd->base.duplex = DUPLEX_UNKNOWN;
6929	}
6930	spin_unlock_bh(&bp->phy_lock);
6931
6932	cmd->base.phy_address = bp->phy_addr;
6933
6934	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6935						supported);
6936	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6937						advertising);
6938
6939	return 0;
6940}
6941
6942static int
6943bnx2_set_link_ksettings(struct net_device *dev,
6944			const struct ethtool_link_ksettings *cmd)
6945{
6946	struct bnx2 *bp = netdev_priv(dev);
6947	u8 autoneg = bp->autoneg;
6948	u8 req_duplex = bp->req_duplex;
6949	u16 req_line_speed = bp->req_line_speed;
6950	u32 advertising = bp->advertising;
6951	int err = -EINVAL;
6952
6953	spin_lock_bh(&bp->phy_lock);
6954
6955	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6956		goto err_out_unlock;
6957
6958	if (cmd->base.port != bp->phy_port &&
6959	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6960		goto err_out_unlock;
6961
6962	/* If device is down, we can store the settings only if the user
6963	 * is setting the currently active port.
6964	 */
6965	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6966		goto err_out_unlock;
6967
6968	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6969		autoneg |= AUTONEG_SPEED;
6970
6971		ethtool_convert_link_mode_to_legacy_u32(
6972			&advertising, cmd->link_modes.advertising);
6973
6974		if (cmd->base.port == PORT_TP) {
6975			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6976			if (!advertising)
6977				advertising = ETHTOOL_ALL_COPPER_SPEED;
6978		} else {
6979			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6980			if (!advertising)
6981				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6982		}
6983		advertising |= ADVERTISED_Autoneg;
6984	}
6985	else {
6986		u32 speed = cmd->base.speed;
6987
6988		if (cmd->base.port == PORT_FIBRE) {
6989			if ((speed != SPEED_1000 &&
6990			     speed != SPEED_2500) ||
6991			    (cmd->base.duplex != DUPLEX_FULL))
6992				goto err_out_unlock;
6993
6994			if (speed == SPEED_2500 &&
6995			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6996				goto err_out_unlock;
6997		} else if (speed == SPEED_1000 || speed == SPEED_2500)
6998			goto err_out_unlock;
6999
7000		autoneg &= ~AUTONEG_SPEED;
7001		req_line_speed = speed;
7002		req_duplex = cmd->base.duplex;
7003		advertising = 0;
7004	}
7005
7006	bp->autoneg = autoneg;
7007	bp->advertising = advertising;
7008	bp->req_line_speed = req_line_speed;
7009	bp->req_duplex = req_duplex;
7010
7011	err = 0;
7012	/* If device is down, the new settings will be picked up when it is
7013	 * brought up.
7014	 */
7015	if (netif_running(dev))
7016		err = bnx2_setup_phy(bp, cmd->base.port);
7017
7018err_out_unlock:
7019	spin_unlock_bh(&bp->phy_lock);
7020
7021	return err;
7022}
7023
7024static void
7025bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7026{
7027	struct bnx2 *bp = netdev_priv(dev);
7028
7029	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7030	strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7031	strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7032}
7033
7034#define BNX2_REGDUMP_LEN		(32 * 1024)
7035
7036static int
7037bnx2_get_regs_len(struct net_device *dev)
7038{
7039	return BNX2_REGDUMP_LEN;
7040}
7041
7042static void
7043bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7044{
7045	u32 *p = _p, i, offset;
7046	u8 *orig_p = _p;
7047	struct bnx2 *bp = netdev_priv(dev);
7048	static const u32 reg_boundaries[] = {
7049		0x0000, 0x0098, 0x0400, 0x045c,
7050		0x0800, 0x0880, 0x0c00, 0x0c10,
7051		0x0c30, 0x0d08, 0x1000, 0x101c,
7052		0x1040, 0x1048, 0x1080, 0x10a4,
7053		0x1400, 0x1490, 0x1498, 0x14f0,
7054		0x1500, 0x155c, 0x1580, 0x15dc,
7055		0x1600, 0x1658, 0x1680, 0x16d8,
7056		0x1800, 0x1820, 0x1840, 0x1854,
7057		0x1880, 0x1894, 0x1900, 0x1984,
7058		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7059		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7060		0x2000, 0x2030, 0x23c0, 0x2400,
7061		0x2800, 0x2820, 0x2830, 0x2850,
7062		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7063		0x3c00, 0x3c94, 0x4000, 0x4010,
7064		0x4080, 0x4090, 0x43c0, 0x4458,
7065		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7066		0x4fc0, 0x5010, 0x53c0, 0x5444,
7067		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7068		0x5fc0, 0x6000, 0x6400, 0x6428,
7069		0x6800, 0x6848, 0x684c, 0x6860,
7070		0x6888, 0x6910, 0x8000
7071	};
7072
7073	regs->version = 0;
7074
7075	memset(p, 0, BNX2_REGDUMP_LEN);
7076
7077	if (!netif_running(bp->dev))
7078		return;
7079
7080	i = 0;
7081	offset = reg_boundaries[0];
7082	p += offset;
7083	while (offset < BNX2_REGDUMP_LEN) {
7084		*p++ = BNX2_RD(bp, offset);
7085		offset += 4;
7086		if (offset == reg_boundaries[i + 1]) {
7087			offset = reg_boundaries[i + 2];
7088			p = (u32 *) (orig_p + offset);
7089			i += 2;
7090		}
7091	}
7092}
7093
7094static void
7095bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7096{
7097	struct bnx2 *bp = netdev_priv(dev);
7098
7099	if (bp->flags & BNX2_FLAG_NO_WOL) {
7100		wol->supported = 0;
7101		wol->wolopts = 0;
7102	}
7103	else {
7104		wol->supported = WAKE_MAGIC;
7105		if (bp->wol)
7106			wol->wolopts = WAKE_MAGIC;
7107		else
7108			wol->wolopts = 0;
7109	}
7110	memset(&wol->sopass, 0, sizeof(wol->sopass));
7111}
7112
7113static int
7114bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7115{
7116	struct bnx2 *bp = netdev_priv(dev);
7117
7118	if (wol->wolopts & ~WAKE_MAGIC)
7119		return -EINVAL;
7120
7121	if (wol->wolopts & WAKE_MAGIC) {
7122		if (bp->flags & BNX2_FLAG_NO_WOL)
7123			return -EINVAL;
7124
7125		bp->wol = 1;
7126	}
7127	else {
7128		bp->wol = 0;
7129	}
7130
7131	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7132
7133	return 0;
7134}
7135
7136static int
7137bnx2_nway_reset(struct net_device *dev)
7138{
7139	struct bnx2 *bp = netdev_priv(dev);
7140	u32 bmcr;
7141
7142	if (!netif_running(dev))
7143		return -EAGAIN;
7144
7145	if (!(bp->autoneg & AUTONEG_SPEED)) {
7146		return -EINVAL;
7147	}
7148
7149	spin_lock_bh(&bp->phy_lock);
7150
7151	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7152		int rc;
7153
7154		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7155		spin_unlock_bh(&bp->phy_lock);
7156		return rc;
7157	}
7158
7159	/* Force a link down visible on the other side */
7160	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7161		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7162		spin_unlock_bh(&bp->phy_lock);
7163
7164		msleep(20);
7165
7166		spin_lock_bh(&bp->phy_lock);
7167
7168		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7169		bp->serdes_an_pending = 1;
7170		mod_timer(&bp->timer, jiffies + bp->current_interval);
7171	}
7172
7173	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7174	bmcr &= ~BMCR_LOOPBACK;
7175	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7176
7177	spin_unlock_bh(&bp->phy_lock);
7178
7179	return 0;
7180}
7181
7182static u32
7183bnx2_get_link(struct net_device *dev)
7184{
7185	struct bnx2 *bp = netdev_priv(dev);
7186
7187	return bp->link_up;
7188}
7189
7190static int
7191bnx2_get_eeprom_len(struct net_device *dev)
7192{
7193	struct bnx2 *bp = netdev_priv(dev);
7194
7195	if (!bp->flash_info)
7196		return 0;
7197
7198	return (int) bp->flash_size;
7199}
7200
7201static int
7202bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7203		u8 *eebuf)
7204{
7205	struct bnx2 *bp = netdev_priv(dev);
7206	int rc;
7207
7208	/* parameters already validated in ethtool_get_eeprom */
7209
7210	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7211
7212	return rc;
7213}
7214
7215static int
7216bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7217		u8 *eebuf)
7218{
7219	struct bnx2 *bp = netdev_priv(dev);
7220	int rc;
7221
7222	/* parameters already validated in ethtool_set_eeprom */
7223
7224	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7225
7226	return rc;
7227}
7228
7229static int bnx2_get_coalesce(struct net_device *dev,
7230			     struct ethtool_coalesce *coal,
7231			     struct kernel_ethtool_coalesce *kernel_coal,
7232			     struct netlink_ext_ack *extack)
7233{
7234	struct bnx2 *bp = netdev_priv(dev);
7235
7236	memset(coal, 0, sizeof(struct ethtool_coalesce));
7237
7238	coal->rx_coalesce_usecs = bp->rx_ticks;
7239	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7240	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7241	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7242
7243	coal->tx_coalesce_usecs = bp->tx_ticks;
7244	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7245	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7246	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7247
7248	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7249
7250	return 0;
7251}
7252
7253static int bnx2_set_coalesce(struct net_device *dev,
7254			     struct ethtool_coalesce *coal,
7255			     struct kernel_ethtool_coalesce *kernel_coal,
7256			     struct netlink_ext_ack *extack)
7257{
7258	struct bnx2 *bp = netdev_priv(dev);
7259
7260	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7261	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7262
7263	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7264	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7265
7266	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7267	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7268
7269	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7270	if (bp->rx_quick_cons_trip_int > 0xff)
7271		bp->rx_quick_cons_trip_int = 0xff;
7272
7273	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7274	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7275
7276	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7277	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7278
7279	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7280	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7281
7282	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7283	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7284		0xff;
7285
7286	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7287	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7288		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7289			bp->stats_ticks = USEC_PER_SEC;
7290	}
7291	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7292		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7293	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7294
7295	if (netif_running(bp->dev)) {
7296		bnx2_netif_stop(bp, true);
7297		bnx2_init_nic(bp, 0);
7298		bnx2_netif_start(bp, true);
7299	}
7300
7301	return 0;
7302}
7303
7304static void
7305bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7306		   struct kernel_ethtool_ringparam *kernel_ering,
7307		   struct netlink_ext_ack *extack)
7308{
7309	struct bnx2 *bp = netdev_priv(dev);
7310
7311	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7312	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7313
7314	ering->rx_pending = bp->rx_ring_size;
7315	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7316
7317	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7318	ering->tx_pending = bp->tx_ring_size;
7319}
7320
7321static int
7322bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7323{
7324	if (netif_running(bp->dev)) {
7325		/* Reset will erase chipset stats; save them */
7326		bnx2_save_stats(bp);
7327
7328		bnx2_netif_stop(bp, true);
7329		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7330		if (reset_irq) {
7331			bnx2_free_irq(bp);
7332			bnx2_del_napi(bp);
7333		} else {
7334			__bnx2_free_irq(bp);
7335		}
7336		bnx2_free_skbs(bp);
7337		bnx2_free_mem(bp);
7338	}
7339
7340	bnx2_set_rx_ring_size(bp, rx);
7341	bp->tx_ring_size = tx;
7342
7343	if (netif_running(bp->dev)) {
7344		int rc = 0;
7345
7346		if (reset_irq) {
7347			rc = bnx2_setup_int_mode(bp, disable_msi);
7348			bnx2_init_napi(bp);
7349		}
7350
7351		if (!rc)
7352			rc = bnx2_alloc_mem(bp);
7353
7354		if (!rc)
7355			rc = bnx2_request_irq(bp);
7356
7357		if (!rc)
7358			rc = bnx2_init_nic(bp, 0);
7359
7360		if (rc) {
7361			bnx2_napi_enable(bp);
7362			dev_close(bp->dev);
7363			return rc;
7364		}
7365#ifdef BCM_CNIC
7366		mutex_lock(&bp->cnic_lock);
7367		/* Let cnic know about the new status block. */
7368		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7369			bnx2_setup_cnic_irq_info(bp);
7370		mutex_unlock(&bp->cnic_lock);
7371#endif
7372		bnx2_netif_start(bp, true);
7373	}
7374	return 0;
7375}
7376
7377static int
7378bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
7379		   struct kernel_ethtool_ringparam *kernel_ering,
7380		   struct netlink_ext_ack *extack)
7381{
7382	struct bnx2 *bp = netdev_priv(dev);
7383	int rc;
7384
7385	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7386		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7387		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7388
7389		return -EINVAL;
7390	}
7391	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7392				   false);
7393	return rc;
7394}
7395
7396static void
7397bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7398{
7399	struct bnx2 *bp = netdev_priv(dev);
7400
7401	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7402	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7403	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7404}
7405
7406static int
7407bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7408{
7409	struct bnx2 *bp = netdev_priv(dev);
7410
7411	bp->req_flow_ctrl = 0;
7412	if (epause->rx_pause)
7413		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7414	if (epause->tx_pause)
7415		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7416
7417	if (epause->autoneg) {
7418		bp->autoneg |= AUTONEG_FLOW_CTRL;
7419	}
7420	else {
7421		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7422	}
7423
7424	if (netif_running(dev)) {
7425		spin_lock_bh(&bp->phy_lock);
7426		bnx2_setup_phy(bp, bp->phy_port);
7427		spin_unlock_bh(&bp->phy_lock);
7428	}
7429
7430	return 0;
7431}
7432
7433static struct {
7434	char string[ETH_GSTRING_LEN];
7435} bnx2_stats_str_arr[] = {
7436	{ "rx_bytes" },
7437	{ "rx_error_bytes" },
7438	{ "tx_bytes" },
7439	{ "tx_error_bytes" },
7440	{ "rx_ucast_packets" },
7441	{ "rx_mcast_packets" },
7442	{ "rx_bcast_packets" },
7443	{ "tx_ucast_packets" },
7444	{ "tx_mcast_packets" },
7445	{ "tx_bcast_packets" },
7446	{ "tx_mac_errors" },
7447	{ "tx_carrier_errors" },
7448	{ "rx_crc_errors" },
7449	{ "rx_align_errors" },
7450	{ "tx_single_collisions" },
7451	{ "tx_multi_collisions" },
7452	{ "tx_deferred" },
7453	{ "tx_excess_collisions" },
7454	{ "tx_late_collisions" },
7455	{ "tx_total_collisions" },
7456	{ "rx_fragments" },
7457	{ "rx_jabbers" },
7458	{ "rx_undersize_packets" },
7459	{ "rx_oversize_packets" },
7460	{ "rx_64_byte_packets" },
7461	{ "rx_65_to_127_byte_packets" },
7462	{ "rx_128_to_255_byte_packets" },
7463	{ "rx_256_to_511_byte_packets" },
7464	{ "rx_512_to_1023_byte_packets" },
7465	{ "rx_1024_to_1522_byte_packets" },
7466	{ "rx_1523_to_9022_byte_packets" },
7467	{ "tx_64_byte_packets" },
7468	{ "tx_65_to_127_byte_packets" },
7469	{ "tx_128_to_255_byte_packets" },
7470	{ "tx_256_to_511_byte_packets" },
7471	{ "tx_512_to_1023_byte_packets" },
7472	{ "tx_1024_to_1522_byte_packets" },
7473	{ "tx_1523_to_9022_byte_packets" },
7474	{ "rx_xon_frames" },
7475	{ "rx_xoff_frames" },
7476	{ "tx_xon_frames" },
7477	{ "tx_xoff_frames" },
7478	{ "rx_mac_ctrl_frames" },
7479	{ "rx_filtered_packets" },
7480	{ "rx_ftq_discards" },
7481	{ "rx_discards" },
7482	{ "rx_fw_discards" },
7483};
7484
7485#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7486
7487#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7488
7489static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7490    STATS_OFFSET32(stat_IfHCInOctets_hi),
7491    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7492    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7493    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7494    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7495    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7496    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7497    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7498    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7499    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7500    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7501    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7502    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7503    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7504    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7505    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7506    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7507    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7508    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7509    STATS_OFFSET32(stat_EtherStatsCollisions),
7510    STATS_OFFSET32(stat_EtherStatsFragments),
7511    STATS_OFFSET32(stat_EtherStatsJabbers),
7512    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7513    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7514    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7515    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7516    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7517    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7518    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7519    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7520    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7521    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7522    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7523    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7524    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7525    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7526    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7527    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7528    STATS_OFFSET32(stat_XonPauseFramesReceived),
7529    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7530    STATS_OFFSET32(stat_OutXonSent),
7531    STATS_OFFSET32(stat_OutXoffSent),
7532    STATS_OFFSET32(stat_MacControlFramesReceived),
7533    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7534    STATS_OFFSET32(stat_IfInFTQDiscards),
7535    STATS_OFFSET32(stat_IfInMBUFDiscards),
7536    STATS_OFFSET32(stat_FwRxDrop),
7537};
7538
7539/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7540 * skipped because of errata.
7541 */
7542static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7543	8,0,8,8,8,8,8,8,8,8,
7544	4,0,4,4,4,4,4,4,4,4,
7545	4,4,4,4,4,4,4,4,4,4,
7546	4,4,4,4,4,4,4,4,4,4,
7547	4,4,4,4,4,4,4,
7548};
7549
7550static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7551	8,0,8,8,8,8,8,8,8,8,
7552	4,4,4,4,4,4,4,4,4,4,
7553	4,4,4,4,4,4,4,4,4,4,
7554	4,4,4,4,4,4,4,4,4,4,
7555	4,4,4,4,4,4,4,
7556};
7557
7558#define BNX2_NUM_TESTS 6
7559
7560static struct {
7561	char string[ETH_GSTRING_LEN];
7562} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7563	{ "register_test (offline)" },
7564	{ "memory_test (offline)" },
7565	{ "loopback_test (offline)" },
7566	{ "nvram_test (online)" },
7567	{ "interrupt_test (online)" },
7568	{ "link_test (online)" },
7569};
7570
7571static int
7572bnx2_get_sset_count(struct net_device *dev, int sset)
7573{
7574	switch (sset) {
7575	case ETH_SS_TEST:
7576		return BNX2_NUM_TESTS;
7577	case ETH_SS_STATS:
7578		return BNX2_NUM_STATS;
7579	default:
7580		return -EOPNOTSUPP;
7581	}
7582}
7583
7584static void
7585bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7586{
7587	struct bnx2 *bp = netdev_priv(dev);
7588
7589	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7590	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7591		int i;
7592
7593		bnx2_netif_stop(bp, true);
7594		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7595		bnx2_free_skbs(bp);
7596
7597		if (bnx2_test_registers(bp) != 0) {
7598			buf[0] = 1;
7599			etest->flags |= ETH_TEST_FL_FAILED;
7600		}
7601		if (bnx2_test_memory(bp) != 0) {
7602			buf[1] = 1;
7603			etest->flags |= ETH_TEST_FL_FAILED;
7604		}
7605		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7606			etest->flags |= ETH_TEST_FL_FAILED;
7607
7608		if (!netif_running(bp->dev))
7609			bnx2_shutdown_chip(bp);
7610		else {
7611			bnx2_init_nic(bp, 1);
7612			bnx2_netif_start(bp, true);
7613		}
7614
7615		/* wait for link up */
7616		for (i = 0; i < 7; i++) {
7617			if (bp->link_up)
7618				break;
7619			msleep_interruptible(1000);
7620		}
7621	}
7622
7623	if (bnx2_test_nvram(bp) != 0) {
7624		buf[3] = 1;
7625		etest->flags |= ETH_TEST_FL_FAILED;
7626	}
7627	if (bnx2_test_intr(bp) != 0) {
7628		buf[4] = 1;
7629		etest->flags |= ETH_TEST_FL_FAILED;
7630	}
7631
7632	if (bnx2_test_link(bp) != 0) {
7633		buf[5] = 1;
7634		etest->flags |= ETH_TEST_FL_FAILED;
7635
7636	}
7637}
7638
7639static void
7640bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7641{
7642	switch (stringset) {
7643	case ETH_SS_STATS:
7644		memcpy(buf, bnx2_stats_str_arr,
7645			sizeof(bnx2_stats_str_arr));
7646		break;
7647	case ETH_SS_TEST:
7648		memcpy(buf, bnx2_tests_str_arr,
7649			sizeof(bnx2_tests_str_arr));
7650		break;
7651	}
7652}
7653
7654static void
7655bnx2_get_ethtool_stats(struct net_device *dev,
7656		struct ethtool_stats *stats, u64 *buf)
7657{
7658	struct bnx2 *bp = netdev_priv(dev);
7659	int i;
7660	u32 *hw_stats = (u32 *) bp->stats_blk;
7661	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7662	u8 *stats_len_arr = NULL;
7663
7664	if (!hw_stats) {
7665		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7666		return;
7667	}
7668
7669	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7670	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7671	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7672	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7673		stats_len_arr = bnx2_5706_stats_len_arr;
7674	else
7675		stats_len_arr = bnx2_5708_stats_len_arr;
7676
7677	for (i = 0; i < BNX2_NUM_STATS; i++) {
7678		unsigned long offset;
7679
7680		if (stats_len_arr[i] == 0) {
7681			/* skip this counter */
7682			buf[i] = 0;
7683			continue;
7684		}
7685
7686		offset = bnx2_stats_offset_arr[i];
7687		if (stats_len_arr[i] == 4) {
7688			/* 4-byte counter */
7689			buf[i] = (u64) *(hw_stats + offset) +
7690				 *(temp_stats + offset);
7691			continue;
7692		}
7693		/* 8-byte counter */
7694		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7695			 *(hw_stats + offset + 1) +
7696			 (((u64) *(temp_stats + offset)) << 32) +
7697			 *(temp_stats + offset + 1);
7698	}
7699}
7700
7701static int
7702bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7703{
7704	struct bnx2 *bp = netdev_priv(dev);
7705
7706	switch (state) {
7707	case ETHTOOL_ID_ACTIVE:
7708		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7709		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7710		return 1;	/* cycle on/off once per second */
7711
7712	case ETHTOOL_ID_ON:
7713		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7714			BNX2_EMAC_LED_1000MB_OVERRIDE |
7715			BNX2_EMAC_LED_100MB_OVERRIDE |
7716			BNX2_EMAC_LED_10MB_OVERRIDE |
7717			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7718			BNX2_EMAC_LED_TRAFFIC);
7719		break;
7720
7721	case ETHTOOL_ID_OFF:
7722		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7723		break;
7724
7725	case ETHTOOL_ID_INACTIVE:
7726		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7727		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7728		break;
7729	}
7730
7731	return 0;
7732}
7733
7734static int
7735bnx2_set_features(struct net_device *dev, netdev_features_t features)
7736{
7737	struct bnx2 *bp = netdev_priv(dev);
7738
7739	/* TSO with VLAN tag won't work with current firmware */
7740	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7741		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7742	else
7743		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7744
7745	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7746	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7747	    netif_running(dev)) {
7748		bnx2_netif_stop(bp, false);
7749		dev->features = features;
7750		bnx2_set_rx_mode(dev);
7751		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7752		bnx2_netif_start(bp, false);
7753		return 1;
7754	}
7755
7756	return 0;
7757}
7758
7759static void bnx2_get_channels(struct net_device *dev,
7760			      struct ethtool_channels *channels)
7761{
7762	struct bnx2 *bp = netdev_priv(dev);
7763	u32 max_rx_rings = 1;
7764	u32 max_tx_rings = 1;
7765
7766	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7767		max_rx_rings = RX_MAX_RINGS;
7768		max_tx_rings = TX_MAX_RINGS;
7769	}
7770
7771	channels->max_rx = max_rx_rings;
7772	channels->max_tx = max_tx_rings;
7773	channels->max_other = 0;
7774	channels->max_combined = 0;
7775	channels->rx_count = bp->num_rx_rings;
7776	channels->tx_count = bp->num_tx_rings;
7777	channels->other_count = 0;
7778	channels->combined_count = 0;
7779}
7780
7781static int bnx2_set_channels(struct net_device *dev,
7782			      struct ethtool_channels *channels)
7783{
7784	struct bnx2 *bp = netdev_priv(dev);
7785	u32 max_rx_rings = 1;
7786	u32 max_tx_rings = 1;
7787	int rc = 0;
7788
7789	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7790		max_rx_rings = RX_MAX_RINGS;
7791		max_tx_rings = TX_MAX_RINGS;
7792	}
7793	if (channels->rx_count > max_rx_rings ||
7794	    channels->tx_count > max_tx_rings)
7795		return -EINVAL;
7796
7797	bp->num_req_rx_rings = channels->rx_count;
7798	bp->num_req_tx_rings = channels->tx_count;
7799
7800	if (netif_running(dev))
7801		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7802					   bp->tx_ring_size, true);
7803
7804	return rc;
7805}
7806
7807static const struct ethtool_ops bnx2_ethtool_ops = {
7808	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7809				     ETHTOOL_COALESCE_MAX_FRAMES |
7810				     ETHTOOL_COALESCE_USECS_IRQ |
7811				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7812				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7813	.get_drvinfo		= bnx2_get_drvinfo,
7814	.get_regs_len		= bnx2_get_regs_len,
7815	.get_regs		= bnx2_get_regs,
7816	.get_wol		= bnx2_get_wol,
7817	.set_wol		= bnx2_set_wol,
7818	.nway_reset		= bnx2_nway_reset,
7819	.get_link		= bnx2_get_link,
7820	.get_eeprom_len		= bnx2_get_eeprom_len,
7821	.get_eeprom		= bnx2_get_eeprom,
7822	.set_eeprom		= bnx2_set_eeprom,
7823	.get_coalesce		= bnx2_get_coalesce,
7824	.set_coalesce		= bnx2_set_coalesce,
7825	.get_ringparam		= bnx2_get_ringparam,
7826	.set_ringparam		= bnx2_set_ringparam,
7827	.get_pauseparam		= bnx2_get_pauseparam,
7828	.set_pauseparam		= bnx2_set_pauseparam,
7829	.self_test		= bnx2_self_test,
7830	.get_strings		= bnx2_get_strings,
7831	.set_phys_id		= bnx2_set_phys_id,
7832	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7833	.get_sset_count		= bnx2_get_sset_count,
7834	.get_channels		= bnx2_get_channels,
7835	.set_channels		= bnx2_set_channels,
7836	.get_link_ksettings	= bnx2_get_link_ksettings,
7837	.set_link_ksettings	= bnx2_set_link_ksettings,
7838};
7839
7840/* Called with rtnl_lock */
7841static int
7842bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7843{
7844	struct mii_ioctl_data *data = if_mii(ifr);
7845	struct bnx2 *bp = netdev_priv(dev);
7846	int err;
7847
7848	switch(cmd) {
7849	case SIOCGMIIPHY:
7850		data->phy_id = bp->phy_addr;
7851
7852		fallthrough;
7853	case SIOCGMIIREG: {
7854		u32 mii_regval;
7855
7856		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7857			return -EOPNOTSUPP;
7858
7859		if (!netif_running(dev))
7860			return -EAGAIN;
7861
7862		spin_lock_bh(&bp->phy_lock);
7863		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7864		spin_unlock_bh(&bp->phy_lock);
7865
7866		data->val_out = mii_regval;
7867
7868		return err;
7869	}
7870
7871	case SIOCSMIIREG:
7872		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7873			return -EOPNOTSUPP;
7874
7875		if (!netif_running(dev))
7876			return -EAGAIN;
7877
7878		spin_lock_bh(&bp->phy_lock);
7879		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7880		spin_unlock_bh(&bp->phy_lock);
7881
7882		return err;
7883
7884	default:
7885		/* do nothing */
7886		break;
7887	}
7888	return -EOPNOTSUPP;
7889}
7890
7891/* Called with rtnl_lock */
7892static int
7893bnx2_change_mac_addr(struct net_device *dev, void *p)
7894{
7895	struct sockaddr *addr = p;
7896	struct bnx2 *bp = netdev_priv(dev);
7897
7898	if (!is_valid_ether_addr(addr->sa_data))
7899		return -EADDRNOTAVAIL;
7900
7901	eth_hw_addr_set(dev, addr->sa_data);
7902	if (netif_running(dev))
7903		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7904
7905	return 0;
7906}
7907
7908/* Called with rtnl_lock */
7909static int
7910bnx2_change_mtu(struct net_device *dev, int new_mtu)
7911{
7912	struct bnx2 *bp = netdev_priv(dev);
7913
7914	dev->mtu = new_mtu;
7915	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7916				     false);
7917}
7918
7919#ifdef CONFIG_NET_POLL_CONTROLLER
7920static void
7921poll_bnx2(struct net_device *dev)
7922{
7923	struct bnx2 *bp = netdev_priv(dev);
7924	int i;
7925
7926	for (i = 0; i < bp->irq_nvecs; i++) {
7927		struct bnx2_irq *irq = &bp->irq_tbl[i];
7928
7929		disable_irq(irq->vector);
7930		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7931		enable_irq(irq->vector);
7932	}
7933}
7934#endif
7935
7936static void
7937bnx2_get_5709_media(struct bnx2 *bp)
7938{
7939	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7940	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7941	u32 strap;
7942
7943	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7944		return;
7945	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7946		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7947		return;
7948	}
7949
7950	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7951		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7952	else
7953		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7954
7955	if (bp->func == 0) {
7956		switch (strap) {
7957		case 0x4:
7958		case 0x5:
7959		case 0x6:
7960			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7961			return;
7962		}
7963	} else {
7964		switch (strap) {
7965		case 0x1:
7966		case 0x2:
7967		case 0x4:
7968			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7969			return;
7970		}
7971	}
7972}
7973
7974static void
7975bnx2_get_pci_speed(struct bnx2 *bp)
7976{
7977	u32 reg;
7978
7979	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7980	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7981		u32 clkreg;
7982
7983		bp->flags |= BNX2_FLAG_PCIX;
7984
7985		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7986
7987		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7988		switch (clkreg) {
7989		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7990			bp->bus_speed_mhz = 133;
7991			break;
7992
7993		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7994			bp->bus_speed_mhz = 100;
7995			break;
7996
7997		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7998		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7999			bp->bus_speed_mhz = 66;
8000			break;
8001
8002		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8003		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8004			bp->bus_speed_mhz = 50;
8005			break;
8006
8007		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8008		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8009		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8010			bp->bus_speed_mhz = 33;
8011			break;
8012		}
8013	}
8014	else {
8015		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8016			bp->bus_speed_mhz = 66;
8017		else
8018			bp->bus_speed_mhz = 33;
8019	}
8020
8021	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8022		bp->flags |= BNX2_FLAG_PCI_32BIT;
8023
8024}
8025
8026static void
8027bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8028{
8029	unsigned int len;
8030	int rc, i, j;
8031	u8 *data;
 
8032
8033#define BNX2_VPD_NVRAM_OFFSET	0x300
8034#define BNX2_VPD_LEN		128
8035#define BNX2_MAX_VER_SLEN	30
8036
8037	data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL);
8038	if (!data)
8039		return;
8040
8041	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
 
8042	if (rc)
8043		goto vpd_done;
8044
8045	for (i = 0; i < BNX2_VPD_LEN; i += 4)
8046		swab32s((u32 *)&data[i]);
 
 
 
 
8047
8048	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8049					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
 
 
 
 
 
 
 
 
 
 
 
8050	if (j < 0)
8051		goto vpd_done;
8052
8053	if (len != 4 || memcmp(&data[j], "1028", 4))
 
 
 
 
8054		goto vpd_done;
8055
8056	j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
8057					 PCI_VPD_RO_KEYWORD_VENDOR0,
8058					 &len);
8059	if (j < 0)
8060		goto vpd_done;
8061
8062	if (len > BNX2_MAX_VER_SLEN)
 
 
 
8063		goto vpd_done;
8064
8065	memcpy(bp->fw_version, &data[j], len);
8066	bp->fw_version[len] = ' ';
8067
8068vpd_done:
8069	kfree(data);
8070}
8071
8072static int
8073bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8074{
8075	struct bnx2 *bp;
8076	int rc, i, j;
8077	u32 reg;
8078	u64 dma_mask, persist_dma_mask;
 
8079
8080	SET_NETDEV_DEV(dev, &pdev->dev);
8081	bp = netdev_priv(dev);
8082
8083	bp->flags = 0;
8084	bp->phy_flags = 0;
8085
8086	bp->temp_stats_blk =
8087		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8088
8089	if (!bp->temp_stats_blk) {
8090		rc = -ENOMEM;
8091		goto err_out;
8092	}
8093
8094	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8095	rc = pci_enable_device(pdev);
8096	if (rc) {
8097		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8098		goto err_out;
8099	}
8100
8101	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8102		dev_err(&pdev->dev,
8103			"Cannot find PCI device base address, aborting\n");
8104		rc = -ENODEV;
8105		goto err_out_disable;
8106	}
8107
8108	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8109	if (rc) {
8110		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8111		goto err_out_disable;
8112	}
8113
8114	pci_set_master(pdev);
8115
8116	bp->pm_cap = pdev->pm_cap;
8117	if (bp->pm_cap == 0) {
8118		dev_err(&pdev->dev,
8119			"Cannot find power management capability, aborting\n");
8120		rc = -EIO;
8121		goto err_out_release;
8122	}
8123
8124	bp->dev = dev;
8125	bp->pdev = pdev;
8126
8127	spin_lock_init(&bp->phy_lock);
8128	spin_lock_init(&bp->indirect_lock);
8129#ifdef BCM_CNIC
8130	mutex_init(&bp->cnic_lock);
8131#endif
8132	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8133
8134	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8135							 TX_MAX_TSS_RINGS + 1));
8136	if (!bp->regview) {
8137		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8138		rc = -ENOMEM;
8139		goto err_out_release;
8140	}
8141
8142	/* Configure byte swap and enable write to the reg_window registers.
8143	 * Rely on CPU to do target byte swapping on big endian systems
8144	 * The chip's target access swapping will not swap all accesses
8145	 */
8146	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8147		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8148		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8149
8150	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8151
8152	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8153		if (!pci_is_pcie(pdev)) {
8154			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8155			rc = -EIO;
8156			goto err_out_unmap;
8157		}
8158		bp->flags |= BNX2_FLAG_PCIE;
8159		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8160			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
 
 
 
 
 
 
8161	} else {
8162		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163		if (bp->pcix_cap == 0) {
8164			dev_err(&pdev->dev,
8165				"Cannot find PCIX capability, aborting\n");
8166			rc = -EIO;
8167			goto err_out_unmap;
8168		}
8169		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8170	}
8171
8172	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8174		if (pdev->msix_cap)
8175			bp->flags |= BNX2_FLAG_MSIX_CAP;
8176	}
8177
8178	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8180		if (pdev->msi_cap)
8181			bp->flags |= BNX2_FLAG_MSI_CAP;
8182	}
8183
8184	/* 5708 cannot support DMA addresses > 40-bit.  */
8185	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8186		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8187	else
8188		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8189
8190	/* Configure DMA attributes. */
8191	if (dma_set_mask(&pdev->dev, dma_mask) == 0) {
8192		dev->features |= NETIF_F_HIGHDMA;
8193		rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
8194		if (rc) {
8195			dev_err(&pdev->dev,
8196				"dma_set_coherent_mask failed, aborting\n");
8197			goto err_out_unmap;
8198		}
8199	} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
8200		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8201		goto err_out_unmap;
8202	}
8203
8204	if (!(bp->flags & BNX2_FLAG_PCIE))
8205		bnx2_get_pci_speed(bp);
8206
8207	/* 5706A0 may falsely detect SERR and PERR. */
8208	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8209		reg = BNX2_RD(bp, PCI_COMMAND);
8210		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8211		BNX2_WR(bp, PCI_COMMAND, reg);
8212	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8213		!(bp->flags & BNX2_FLAG_PCIX)) {
8214		dev_err(&pdev->dev,
8215			"5706 A1 can only be used in a PCIX bus, aborting\n");
8216		rc = -EPERM;
8217		goto err_out_unmap;
8218	}
8219
8220	bnx2_init_nvram(bp);
8221
8222	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8223
8224	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8225		bp->func = 1;
8226
8227	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8228	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8229		u32 off = bp->func << 2;
8230
8231		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8232	} else
8233		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8234
8235	/* Get the permanent MAC address.  First we need to make sure the
8236	 * firmware is actually running.
8237	 */
8238	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8239
8240	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8241	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8242		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8243		rc = -ENODEV;
8244		goto err_out_unmap;
8245	}
8246
8247	bnx2_read_vpd_fw_ver(bp);
8248
8249	j = strlen(bp->fw_version);
8250	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8251	for (i = 0; i < 3 && j < 24; i++) {
8252		u8 num, k, skip0;
8253
8254		if (i == 0) {
8255			bp->fw_version[j++] = 'b';
8256			bp->fw_version[j++] = 'c';
8257			bp->fw_version[j++] = ' ';
8258		}
8259		num = (u8) (reg >> (24 - (i * 8)));
8260		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8261			if (num >= k || !skip0 || k == 1) {
8262				bp->fw_version[j++] = (num / k) + '0';
8263				skip0 = 0;
8264			}
8265		}
8266		if (i != 2)
8267			bp->fw_version[j++] = '.';
8268	}
8269	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8270	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8271		bp->wol = 1;
8272
8273	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8274		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8275
8276		for (i = 0; i < 30; i++) {
8277			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8278			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8279				break;
8280			msleep(10);
8281		}
8282	}
8283	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8284	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8285	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8286	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8287		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8288
8289		if (j < 32)
8290			bp->fw_version[j++] = ' ';
8291		for (i = 0; i < 3 && j < 28; i++) {
8292			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8293			reg = be32_to_cpu(reg);
8294			memcpy(&bp->fw_version[j], &reg, 4);
8295			j += 4;
8296		}
8297	}
8298
8299	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8300	bp->mac_addr[0] = (u8) (reg >> 8);
8301	bp->mac_addr[1] = (u8) reg;
8302
8303	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8304	bp->mac_addr[2] = (u8) (reg >> 24);
8305	bp->mac_addr[3] = (u8) (reg >> 16);
8306	bp->mac_addr[4] = (u8) (reg >> 8);
8307	bp->mac_addr[5] = (u8) reg;
8308
8309	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8310	bnx2_set_rx_ring_size(bp, 255);
8311
8312	bp->tx_quick_cons_trip_int = 2;
8313	bp->tx_quick_cons_trip = 20;
8314	bp->tx_ticks_int = 18;
8315	bp->tx_ticks = 80;
8316
8317	bp->rx_quick_cons_trip_int = 2;
8318	bp->rx_quick_cons_trip = 12;
8319	bp->rx_ticks_int = 18;
8320	bp->rx_ticks = 18;
8321
8322	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8323
8324	bp->current_interval = BNX2_TIMER_INTERVAL;
8325
8326	bp->phy_addr = 1;
8327
8328	/* allocate stats_blk */
8329	rc = bnx2_alloc_stats_blk(dev);
8330	if (rc)
8331		goto err_out_unmap;
8332
8333	/* Disable WOL support if we are running on a SERDES chip. */
8334	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8335		bnx2_get_5709_media(bp);
8336	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8337		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8338
8339	bp->phy_port = PORT_TP;
8340	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8341		bp->phy_port = PORT_FIBRE;
8342		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8343		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8344			bp->flags |= BNX2_FLAG_NO_WOL;
8345			bp->wol = 0;
8346		}
8347		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8348			/* Don't do parallel detect on this board because of
8349			 * some board problems.  The link will not go down
8350			 * if we do parallel detect.
8351			 */
8352			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8353			    pdev->subsystem_device == 0x310c)
8354				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8355		} else {
8356			bp->phy_addr = 2;
8357			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8358				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8359		}
8360	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8361		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8362		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8363	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8364		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8365		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8366		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8367
8368	bnx2_init_fw_cap(bp);
8369
8370	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8371	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8372	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8373	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8374		bp->flags |= BNX2_FLAG_NO_WOL;
8375		bp->wol = 0;
8376	}
8377
8378	if (bp->flags & BNX2_FLAG_NO_WOL)
8379		device_set_wakeup_capable(&bp->pdev->dev, false);
8380	else
8381		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8382
8383	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8384		bp->tx_quick_cons_trip_int =
8385			bp->tx_quick_cons_trip;
8386		bp->tx_ticks_int = bp->tx_ticks;
8387		bp->rx_quick_cons_trip_int =
8388			bp->rx_quick_cons_trip;
8389		bp->rx_ticks_int = bp->rx_ticks;
8390		bp->comp_prod_trip_int = bp->comp_prod_trip;
8391		bp->com_ticks_int = bp->com_ticks;
8392		bp->cmd_ticks_int = bp->cmd_ticks;
8393	}
8394
8395	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8396	 *
8397	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8398	 * with byte enables disabled on the unused 32-bit word.  This is legal
8399	 * but causes problems on the AMD 8132 which will eventually stop
8400	 * responding after a while.
8401	 *
8402	 * AMD believes this incompatibility is unique to the 5706, and
8403	 * prefers to locally disable MSI rather than globally disabling it.
8404	 */
8405	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8406		struct pci_dev *amd_8132 = NULL;
8407
8408		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8409						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8410						  amd_8132))) {
8411
8412			if (amd_8132->revision >= 0x10 &&
8413			    amd_8132->revision <= 0x13) {
8414				disable_msi = 1;
8415				pci_dev_put(amd_8132);
8416				break;
8417			}
8418		}
8419	}
8420
8421	bnx2_set_default_link(bp);
8422	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8423
8424	timer_setup(&bp->timer, bnx2_timer, 0);
8425	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8426
8427#ifdef BCM_CNIC
8428	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8429		bp->cnic_eth_dev.max_iscsi_conn =
8430			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8431			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8432	bp->cnic_probe = bnx2_cnic_probe;
8433#endif
8434	pci_save_state(pdev);
8435
8436	return 0;
8437
8438err_out_unmap:
 
 
 
 
 
8439	pci_iounmap(pdev, bp->regview);
8440	bp->regview = NULL;
8441
8442err_out_release:
8443	pci_release_regions(pdev);
8444
8445err_out_disable:
8446	pci_disable_device(pdev);
8447
8448err_out:
8449	kfree(bp->temp_stats_blk);
8450
8451	return rc;
8452}
8453
8454static char *
8455bnx2_bus_string(struct bnx2 *bp, char *str)
8456{
8457	char *s = str;
8458
8459	if (bp->flags & BNX2_FLAG_PCIE) {
8460		s += sprintf(s, "PCI Express");
8461	} else {
8462		s += sprintf(s, "PCI");
8463		if (bp->flags & BNX2_FLAG_PCIX)
8464			s += sprintf(s, "-X");
8465		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8466			s += sprintf(s, " 32-bit");
8467		else
8468			s += sprintf(s, " 64-bit");
8469		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8470	}
8471	return str;
8472}
8473
8474static void
8475bnx2_del_napi(struct bnx2 *bp)
8476{
8477	int i;
8478
8479	for (i = 0; i < bp->irq_nvecs; i++)
8480		netif_napi_del(&bp->bnx2_napi[i].napi);
8481}
8482
8483static void
8484bnx2_init_napi(struct bnx2 *bp)
8485{
8486	int i;
8487
8488	for (i = 0; i < bp->irq_nvecs; i++) {
8489		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8490		int (*poll)(struct napi_struct *, int);
8491
8492		if (i == 0)
8493			poll = bnx2_poll;
8494		else
8495			poll = bnx2_poll_msix;
8496
8497		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8498		bnapi->bp = bp;
8499	}
8500}
8501
8502static const struct net_device_ops bnx2_netdev_ops = {
8503	.ndo_open		= bnx2_open,
8504	.ndo_start_xmit		= bnx2_start_xmit,
8505	.ndo_stop		= bnx2_close,
8506	.ndo_get_stats64	= bnx2_get_stats64,
8507	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8508	.ndo_eth_ioctl		= bnx2_ioctl,
8509	.ndo_validate_addr	= eth_validate_addr,
8510	.ndo_set_mac_address	= bnx2_change_mac_addr,
8511	.ndo_change_mtu		= bnx2_change_mtu,
8512	.ndo_set_features	= bnx2_set_features,
8513	.ndo_tx_timeout		= bnx2_tx_timeout,
8514#ifdef CONFIG_NET_POLL_CONTROLLER
8515	.ndo_poll_controller	= poll_bnx2,
8516#endif
8517};
8518
8519static int
8520bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8521{
8522	struct net_device *dev;
8523	struct bnx2 *bp;
8524	int rc;
8525	char str[40];
8526
8527	/* dev zeroed in init_etherdev */
8528	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8529	if (!dev)
8530		return -ENOMEM;
8531
8532	rc = bnx2_init_board(pdev, dev);
8533	if (rc < 0)
8534		goto err_free;
8535
8536	dev->netdev_ops = &bnx2_netdev_ops;
8537	dev->watchdog_timeo = TX_TIMEOUT;
8538	dev->ethtool_ops = &bnx2_ethtool_ops;
8539
8540	bp = netdev_priv(dev);
8541
8542	pci_set_drvdata(pdev, dev);
8543
8544	/*
8545	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8546	 * New io-page table has been created before bnx2 does reset at open stage.
8547	 * We have to wait for the in-flight DMA to complete to avoid it look up
8548	 * into the newly created io-page table.
8549	 */
8550	if (is_kdump_kernel())
8551		bnx2_wait_dma_complete(bp);
8552
8553	eth_hw_addr_set(dev, bp->mac_addr);
8554
8555	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8556		NETIF_F_TSO | NETIF_F_TSO_ECN |
8557		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8558
8559	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8560		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8561
8562	dev->vlan_features = dev->hw_features;
8563	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8564	dev->features |= dev->hw_features;
8565	dev->priv_flags |= IFF_UNICAST_FLT;
8566	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8567	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8568
8569	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8570		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8571
8572	if ((rc = register_netdev(dev))) {
8573		dev_err(&pdev->dev, "Cannot register net device\n");
8574		goto error;
8575	}
8576
8577	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8578		    "node addr %pM\n", board_info[ent->driver_data].name,
8579		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8580		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8581		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8582		    pdev->irq, dev->dev_addr);
8583
8584	return 0;
8585
8586error:
8587	pci_iounmap(pdev, bp->regview);
8588	pci_release_regions(pdev);
8589	pci_disable_device(pdev);
8590err_free:
8591	bnx2_free_stats_blk(dev);
8592	free_netdev(dev);
8593	return rc;
8594}
8595
8596static void
8597bnx2_remove_one(struct pci_dev *pdev)
8598{
8599	struct net_device *dev = pci_get_drvdata(pdev);
8600	struct bnx2 *bp = netdev_priv(dev);
8601
8602	unregister_netdev(dev);
8603
8604	del_timer_sync(&bp->timer);
8605	cancel_work_sync(&bp->reset_task);
8606
8607	pci_iounmap(bp->pdev, bp->regview);
8608
8609	bnx2_free_stats_blk(dev);
8610	kfree(bp->temp_stats_blk);
8611
 
 
 
 
 
8612	bnx2_release_firmware(bp);
8613
8614	free_netdev(dev);
8615
8616	pci_release_regions(pdev);
8617	pci_disable_device(pdev);
8618}
8619
8620#ifdef CONFIG_PM_SLEEP
8621static int
8622bnx2_suspend(struct device *device)
8623{
8624	struct net_device *dev = dev_get_drvdata(device);
8625	struct bnx2 *bp = netdev_priv(dev);
8626
8627	if (netif_running(dev)) {
8628		cancel_work_sync(&bp->reset_task);
8629		bnx2_netif_stop(bp, true);
8630		netif_device_detach(dev);
8631		del_timer_sync(&bp->timer);
8632		bnx2_shutdown_chip(bp);
8633		__bnx2_free_irq(bp);
8634		bnx2_free_skbs(bp);
8635	}
8636	bnx2_setup_wol(bp);
8637	return 0;
8638}
8639
8640static int
8641bnx2_resume(struct device *device)
8642{
8643	struct net_device *dev = dev_get_drvdata(device);
8644	struct bnx2 *bp = netdev_priv(dev);
8645
8646	if (!netif_running(dev))
8647		return 0;
8648
8649	bnx2_set_power_state(bp, PCI_D0);
8650	netif_device_attach(dev);
8651	bnx2_request_irq(bp);
8652	bnx2_init_nic(bp, 1);
8653	bnx2_netif_start(bp, true);
8654	return 0;
8655}
8656
8657static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8658#define BNX2_PM_OPS (&bnx2_pm_ops)
8659
8660#else
8661
8662#define BNX2_PM_OPS NULL
8663
8664#endif /* CONFIG_PM_SLEEP */
8665/**
8666 * bnx2_io_error_detected - called when PCI error is detected
8667 * @pdev: Pointer to PCI device
8668 * @state: The current pci connection state
8669 *
8670 * This function is called after a PCI bus error affecting
8671 * this device has been detected.
8672 */
8673static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8674					       pci_channel_state_t state)
8675{
8676	struct net_device *dev = pci_get_drvdata(pdev);
8677	struct bnx2 *bp = netdev_priv(dev);
8678
8679	rtnl_lock();
8680	netif_device_detach(dev);
8681
8682	if (state == pci_channel_io_perm_failure) {
8683		rtnl_unlock();
8684		return PCI_ERS_RESULT_DISCONNECT;
8685	}
8686
8687	if (netif_running(dev)) {
8688		bnx2_netif_stop(bp, true);
8689		del_timer_sync(&bp->timer);
8690		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8691	}
8692
8693	pci_disable_device(pdev);
8694	rtnl_unlock();
8695
8696	/* Request a slot slot reset. */
8697	return PCI_ERS_RESULT_NEED_RESET;
8698}
8699
8700/**
8701 * bnx2_io_slot_reset - called after the pci bus has been reset.
8702 * @pdev: Pointer to PCI device
8703 *
8704 * Restart the card from scratch, as if from a cold-boot.
8705 */
8706static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8707{
8708	struct net_device *dev = pci_get_drvdata(pdev);
8709	struct bnx2 *bp = netdev_priv(dev);
8710	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8711	int err = 0;
8712
8713	rtnl_lock();
8714	if (pci_enable_device(pdev)) {
8715		dev_err(&pdev->dev,
8716			"Cannot re-enable PCI device after reset\n");
8717	} else {
8718		pci_set_master(pdev);
8719		pci_restore_state(pdev);
8720		pci_save_state(pdev);
8721
8722		if (netif_running(dev))
8723			err = bnx2_init_nic(bp, 1);
8724
8725		if (!err)
8726			result = PCI_ERS_RESULT_RECOVERED;
8727	}
8728
8729	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8730		bnx2_napi_enable(bp);
8731		dev_close(dev);
8732	}
8733	rtnl_unlock();
 
 
 
8734
8735	return result;
8736}
8737
8738/**
8739 * bnx2_io_resume - called when traffic can start flowing again.
8740 * @pdev: Pointer to PCI device
8741 *
8742 * This callback is called when the error recovery driver tells us that
8743 * its OK to resume normal operation.
8744 */
8745static void bnx2_io_resume(struct pci_dev *pdev)
8746{
8747	struct net_device *dev = pci_get_drvdata(pdev);
8748	struct bnx2 *bp = netdev_priv(dev);
8749
8750	rtnl_lock();
8751	if (netif_running(dev))
8752		bnx2_netif_start(bp, true);
8753
8754	netif_device_attach(dev);
8755	rtnl_unlock();
8756}
8757
8758static void bnx2_shutdown(struct pci_dev *pdev)
8759{
8760	struct net_device *dev = pci_get_drvdata(pdev);
8761	struct bnx2 *bp;
8762
8763	if (!dev)
8764		return;
8765
8766	bp = netdev_priv(dev);
8767	if (!bp)
8768		return;
8769
8770	rtnl_lock();
8771	if (netif_running(dev))
8772		dev_close(bp->dev);
8773
8774	if (system_state == SYSTEM_POWER_OFF)
8775		bnx2_set_power_state(bp, PCI_D3hot);
8776
8777	rtnl_unlock();
8778}
8779
8780static const struct pci_error_handlers bnx2_err_handler = {
8781	.error_detected	= bnx2_io_error_detected,
8782	.slot_reset	= bnx2_io_slot_reset,
8783	.resume		= bnx2_io_resume,
8784};
8785
8786static struct pci_driver bnx2_pci_driver = {
8787	.name		= DRV_MODULE_NAME,
8788	.id_table	= bnx2_pci_tbl,
8789	.probe		= bnx2_init_one,
8790	.remove		= bnx2_remove_one,
8791	.driver.pm	= BNX2_PM_OPS,
8792	.err_handler	= &bnx2_err_handler,
8793	.shutdown	= bnx2_shutdown,
8794};
8795
8796module_pci_driver(bnx2_pci_driver);
v5.14.15
   1/* bnx2.c: QLogic bnx2 network driver.
   2 *
   3 * Copyright (c) 2004-2014 Broadcom Corporation
   4 * Copyright (c) 2014-2015 QLogic Corporation
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by: Michael Chan  (mchan@broadcom.com)
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/module.h>
  16#include <linux/moduleparam.h>
  17
  18#include <linux/stringify.h>
  19#include <linux/kernel.h>
  20#include <linux/timer.h>
  21#include <linux/errno.h>
  22#include <linux/ioport.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/interrupt.h>
  26#include <linux/pci.h>
  27#include <linux/netdevice.h>
  28#include <linux/etherdevice.h>
  29#include <linux/skbuff.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/bitops.h>
  32#include <asm/io.h>
  33#include <asm/irq.h>
  34#include <linux/delay.h>
  35#include <asm/byteorder.h>
  36#include <asm/page.h>
  37#include <linux/time.h>
  38#include <linux/ethtool.h>
  39#include <linux/mii.h>
  40#include <linux/if.h>
  41#include <linux/if_vlan.h>
  42#include <net/ip.h>
  43#include <net/tcp.h>
  44#include <net/checksum.h>
  45#include <linux/workqueue.h>
  46#include <linux/crc32.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/firmware.h>
  50#include <linux/log2.h>
  51#include <linux/aer.h>
  52#include <linux/crash_dump.h>
  53
  54#if IS_ENABLED(CONFIG_CNIC)
  55#define BCM_CNIC 1
  56#include "cnic_if.h"
  57#endif
  58#include "bnx2.h"
  59#include "bnx2_fw.h"
  60
  61#define DRV_MODULE_NAME		"bnx2"
  62#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.3.fw"
  63#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
  64#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1b.fw"
  65#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  66#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
  67
  68#define RUN_AT(x) (jiffies + (x))
  69
  70/* Time in jiffies before concluding the transmitter is hung. */
  71#define TX_TIMEOUT  (5*HZ)
  72
  73MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  74MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
  75MODULE_LICENSE("GPL");
  76MODULE_FIRMWARE(FW_MIPS_FILE_06);
  77MODULE_FIRMWARE(FW_RV2P_FILE_06);
  78MODULE_FIRMWARE(FW_MIPS_FILE_09);
  79MODULE_FIRMWARE(FW_RV2P_FILE_09);
  80MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  81
  82static int disable_msi = 0;
  83
  84module_param(disable_msi, int, 0444);
  85MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  86
  87typedef enum {
  88	BCM5706 = 0,
  89	NC370T,
  90	NC370I,
  91	BCM5706S,
  92	NC370F,
  93	BCM5708,
  94	BCM5708S,
  95	BCM5709,
  96	BCM5709S,
  97	BCM5716,
  98	BCM5716S,
  99} board_t;
 100
 101/* indexed by board_t, above */
 102static struct {
 103	char *name;
 104} board_info[] = {
 105	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
 106	{ "HP NC370T Multifunction Gigabit Server Adapter" },
 107	{ "HP NC370i Multifunction Gigabit Server Adapter" },
 108	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
 109	{ "HP NC370F Multifunction Gigabit Server Adapter" },
 110	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
 111	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
 112	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
 113	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
 114	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
 115	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
 116	};
 117
 118static const struct pci_device_id bnx2_pci_tbl[] = {
 119	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 120	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
 121	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 122	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
 123	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
 124	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
 125	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
 126	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
 127	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 128	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
 129	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
 130	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
 131	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
 132	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
 133	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
 134	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
 135	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
 136	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
 137	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
 138	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
 139	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
 140	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
 141	{ 0, }
 142};
 143
 144static const struct flash_spec flash_table[] =
 145{
 146#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
 147#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
 148	/* Slow EEPROM */
 149	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
 150	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 151	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 152	 "EEPROM - slow"},
 153	/* Expansion entry 0001 */
 154	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
 155	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 156	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 157	 "Entry 0001"},
 158	/* Saifun SA25F010 (non-buffered flash) */
 159	/* strap, cfg1, & write1 need updates */
 160	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
 161	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 162	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
 163	 "Non-buffered flash (128kB)"},
 164	/* Saifun SA25F020 (non-buffered flash) */
 165	/* strap, cfg1, & write1 need updates */
 166	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
 167	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 168	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
 169	 "Non-buffered flash (256kB)"},
 170	/* Expansion entry 0100 */
 171	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
 172	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 173	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 174	 "Entry 0100"},
 175	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
 176	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
 177	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 178	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
 179	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
 180	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
 181	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
 182	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
 183	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
 184	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
 185	/* Saifun SA25F005 (non-buffered flash) */
 186	/* strap, cfg1, & write1 need updates */
 187	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
 188	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 189	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
 190	 "Non-buffered flash (64kB)"},
 191	/* Fast EEPROM */
 192	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
 193	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
 194	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
 195	 "EEPROM - fast"},
 196	/* Expansion entry 1001 */
 197	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
 198	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 199	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 200	 "Entry 1001"},
 201	/* Expansion entry 1010 */
 202	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
 203	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 204	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 205	 "Entry 1010"},
 206	/* ATMEL AT45DB011B (buffered flash) */
 207	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
 208	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 209	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
 210	 "Buffered flash (128kB)"},
 211	/* Expansion entry 1100 */
 212	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
 213	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 214	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 215	 "Entry 1100"},
 216	/* Expansion entry 1101 */
 217	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
 218	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
 219	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
 220	 "Entry 1101"},
 221	/* Ateml Expansion entry 1110 */
 222	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
 223	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 224	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
 225	 "Entry 1110 (Atmel)"},
 226	/* ATMEL AT45DB021B (buffered flash) */
 227	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
 228	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
 229	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
 230	 "Buffered flash (256kB)"},
 231};
 232
 233static const struct flash_spec flash_5709 = {
 234	.flags		= BNX2_NV_BUFFERED,
 235	.page_bits	= BCM5709_FLASH_PAGE_BITS,
 236	.page_size	= BCM5709_FLASH_PAGE_SIZE,
 237	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
 238	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
 239	.name		= "5709 Buffered flash (256kB)",
 240};
 241
 242MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 243
 244static void bnx2_init_napi(struct bnx2 *bp);
 245static void bnx2_del_napi(struct bnx2 *bp);
 246
 247static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 248{
 249	u32 diff;
 250
 251	/* The ring uses 256 indices for 255 entries, one of them
 252	 * needs to be skipped.
 253	 */
 254	diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
 255	if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
 256		diff &= 0xffff;
 257		if (diff == BNX2_TX_DESC_CNT)
 258			diff = BNX2_MAX_TX_DESC_CNT;
 259	}
 260	return bp->tx_ring_size - diff;
 261}
 262
 263static u32
 264bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
 265{
 266	unsigned long flags;
 267	u32 val;
 268
 269	spin_lock_irqsave(&bp->indirect_lock, flags);
 270	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 271	val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
 272	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 273	return val;
 274}
 275
 276static void
 277bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
 278{
 279	unsigned long flags;
 280
 281	spin_lock_irqsave(&bp->indirect_lock, flags);
 282	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
 283	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
 284	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 285}
 286
 287static void
 288bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
 289{
 290	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
 291}
 292
 293static u32
 294bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
 295{
 296	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
 297}
 298
 299static void
 300bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
 301{
 302	unsigned long flags;
 303
 304	offset += cid_addr;
 305	spin_lock_irqsave(&bp->indirect_lock, flags);
 306	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 307		int i;
 308
 309		BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
 310		BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
 311			offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
 312		for (i = 0; i < 5; i++) {
 313			val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
 314			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
 315				break;
 316			udelay(5);
 317		}
 318	} else {
 319		BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
 320		BNX2_WR(bp, BNX2_CTX_DATA, val);
 321	}
 322	spin_unlock_irqrestore(&bp->indirect_lock, flags);
 323}
 324
 325#ifdef BCM_CNIC
 326static int
 327bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
 328{
 329	struct bnx2 *bp = netdev_priv(dev);
 330	struct drv_ctl_io *io = &info->data.io;
 331
 332	switch (info->cmd) {
 333	case DRV_CTL_IO_WR_CMD:
 334		bnx2_reg_wr_ind(bp, io->offset, io->data);
 335		break;
 336	case DRV_CTL_IO_RD_CMD:
 337		io->data = bnx2_reg_rd_ind(bp, io->offset);
 338		break;
 339	case DRV_CTL_CTX_WR_CMD:
 340		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
 341		break;
 342	default:
 343		return -EINVAL;
 344	}
 345	return 0;
 346}
 347
 348static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
 349{
 350	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 351	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 352	int sb_id;
 353
 354	if (bp->flags & BNX2_FLAG_USING_MSIX) {
 355		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
 356		bnapi->cnic_present = 0;
 357		sb_id = bp->irq_nvecs;
 358		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
 359	} else {
 360		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
 361		bnapi->cnic_tag = bnapi->last_status_idx;
 362		bnapi->cnic_present = 1;
 363		sb_id = 0;
 364		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
 365	}
 366
 367	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
 368	cp->irq_arr[0].status_blk = (void *)
 369		((unsigned long) bnapi->status_blk.msi +
 370		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
 371	cp->irq_arr[0].status_blk_num = sb_id;
 372	cp->num_irq = 1;
 373}
 374
 375static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 376			      void *data)
 377{
 378	struct bnx2 *bp = netdev_priv(dev);
 379	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 380
 381	if (!ops)
 382		return -EINVAL;
 383
 384	if (cp->drv_state & CNIC_DRV_STATE_REGD)
 385		return -EBUSY;
 386
 387	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
 388		return -ENODEV;
 389
 390	bp->cnic_data = data;
 391	rcu_assign_pointer(bp->cnic_ops, ops);
 392
 393	cp->num_irq = 0;
 394	cp->drv_state = CNIC_DRV_STATE_REGD;
 395
 396	bnx2_setup_cnic_irq_info(bp);
 397
 398	return 0;
 399}
 400
 401static int bnx2_unregister_cnic(struct net_device *dev)
 402{
 403	struct bnx2 *bp = netdev_priv(dev);
 404	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 405	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 406
 407	mutex_lock(&bp->cnic_lock);
 408	cp->drv_state = 0;
 409	bnapi->cnic_present = 0;
 410	RCU_INIT_POINTER(bp->cnic_ops, NULL);
 411	mutex_unlock(&bp->cnic_lock);
 412	synchronize_rcu();
 413	return 0;
 414}
 415
 416static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
 417{
 418	struct bnx2 *bp = netdev_priv(dev);
 419	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 420
 421	if (!cp->max_iscsi_conn)
 422		return NULL;
 423
 424	cp->drv_owner = THIS_MODULE;
 425	cp->chip_id = bp->chip_id;
 426	cp->pdev = bp->pdev;
 427	cp->io_base = bp->regview;
 428	cp->drv_ctl = bnx2_drv_ctl;
 429	cp->drv_register_cnic = bnx2_register_cnic;
 430	cp->drv_unregister_cnic = bnx2_unregister_cnic;
 431
 432	return cp;
 433}
 434
 435static void
 436bnx2_cnic_stop(struct bnx2 *bp)
 437{
 438	struct cnic_ops *c_ops;
 439	struct cnic_ctl_info info;
 440
 441	mutex_lock(&bp->cnic_lock);
 442	c_ops = rcu_dereference_protected(bp->cnic_ops,
 443					  lockdep_is_held(&bp->cnic_lock));
 444	if (c_ops) {
 445		info.cmd = CNIC_CTL_STOP_CMD;
 446		c_ops->cnic_ctl(bp->cnic_data, &info);
 447	}
 448	mutex_unlock(&bp->cnic_lock);
 449}
 450
 451static void
 452bnx2_cnic_start(struct bnx2 *bp)
 453{
 454	struct cnic_ops *c_ops;
 455	struct cnic_ctl_info info;
 456
 457	mutex_lock(&bp->cnic_lock);
 458	c_ops = rcu_dereference_protected(bp->cnic_ops,
 459					  lockdep_is_held(&bp->cnic_lock));
 460	if (c_ops) {
 461		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 462			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 463
 464			bnapi->cnic_tag = bnapi->last_status_idx;
 465		}
 466		info.cmd = CNIC_CTL_START_CMD;
 467		c_ops->cnic_ctl(bp->cnic_data, &info);
 468	}
 469	mutex_unlock(&bp->cnic_lock);
 470}
 471
 472#else
 473
 474static void
 475bnx2_cnic_stop(struct bnx2 *bp)
 476{
 477}
 478
 479static void
 480bnx2_cnic_start(struct bnx2 *bp)
 481{
 482}
 483
 484#endif
 485
 486static int
 487bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 488{
 489	u32 val1;
 490	int i, ret;
 491
 492	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 493		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 494		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 495
 496		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 497		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 498
 499		udelay(40);
 500	}
 501
 502	val1 = (bp->phy_addr << 21) | (reg << 16) |
 503		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
 504		BNX2_EMAC_MDIO_COMM_START_BUSY;
 505	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 506
 507	for (i = 0; i < 50; i++) {
 508		udelay(10);
 509
 510		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 511		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 512			udelay(5);
 513
 514			val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 515			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
 516
 517			break;
 518		}
 519	}
 520
 521	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
 522		*val = 0x0;
 523		ret = -EBUSY;
 524	}
 525	else {
 526		*val = val1;
 527		ret = 0;
 528	}
 529
 530	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 531		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 532		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 533
 534		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 535		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 536
 537		udelay(40);
 538	}
 539
 540	return ret;
 541}
 542
 543static int
 544bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
 545{
 546	u32 val1;
 547	int i, ret;
 548
 549	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 550		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 551		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 552
 553		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 554		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 555
 556		udelay(40);
 557	}
 558
 559	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
 560		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
 561		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
 562	BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
 563
 564	for (i = 0; i < 50; i++) {
 565		udelay(10);
 566
 567		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
 568		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
 569			udelay(5);
 570			break;
 571		}
 572	}
 573
 574	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
 575		ret = -EBUSY;
 576	else
 577		ret = 0;
 578
 579	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
 580		val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 581		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
 582
 583		BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
 584		BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
 585
 586		udelay(40);
 587	}
 588
 589	return ret;
 590}
 591
 592static void
 593bnx2_disable_int(struct bnx2 *bp)
 594{
 595	int i;
 596	struct bnx2_napi *bnapi;
 597
 598	for (i = 0; i < bp->irq_nvecs; i++) {
 599		bnapi = &bp->bnx2_napi[i];
 600		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 601		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
 602	}
 603	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
 604}
 605
 606static void
 607bnx2_enable_int(struct bnx2 *bp)
 608{
 609	int i;
 610	struct bnx2_napi *bnapi;
 611
 612	for (i = 0; i < bp->irq_nvecs; i++) {
 613		bnapi = &bp->bnx2_napi[i];
 614
 615		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 616			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 617			BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
 618			bnapi->last_status_idx);
 619
 620		BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
 621			BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
 622			bnapi->last_status_idx);
 623	}
 624	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
 625}
 626
 627static void
 628bnx2_disable_int_sync(struct bnx2 *bp)
 629{
 630	int i;
 631
 632	atomic_inc(&bp->intr_sem);
 633	if (!netif_running(bp->dev))
 634		return;
 635
 636	bnx2_disable_int(bp);
 637	for (i = 0; i < bp->irq_nvecs; i++)
 638		synchronize_irq(bp->irq_tbl[i].vector);
 639}
 640
 641static void
 642bnx2_napi_disable(struct bnx2 *bp)
 643{
 644	int i;
 645
 646	for (i = 0; i < bp->irq_nvecs; i++)
 647		napi_disable(&bp->bnx2_napi[i].napi);
 648}
 649
 650static void
 651bnx2_napi_enable(struct bnx2 *bp)
 652{
 653	int i;
 654
 655	for (i = 0; i < bp->irq_nvecs; i++)
 656		napi_enable(&bp->bnx2_napi[i].napi);
 657}
 658
 659static void
 660bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
 661{
 662	if (stop_cnic)
 663		bnx2_cnic_stop(bp);
 664	if (netif_running(bp->dev)) {
 665		bnx2_napi_disable(bp);
 666		netif_tx_disable(bp->dev);
 667	}
 668	bnx2_disable_int_sync(bp);
 669	netif_carrier_off(bp->dev);	/* prevent tx timeout */
 670}
 671
 672static void
 673bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
 674{
 675	if (atomic_dec_and_test(&bp->intr_sem)) {
 676		if (netif_running(bp->dev)) {
 677			netif_tx_wake_all_queues(bp->dev);
 678			spin_lock_bh(&bp->phy_lock);
 679			if (bp->link_up)
 680				netif_carrier_on(bp->dev);
 681			spin_unlock_bh(&bp->phy_lock);
 682			bnx2_napi_enable(bp);
 683			bnx2_enable_int(bp);
 684			if (start_cnic)
 685				bnx2_cnic_start(bp);
 686		}
 687	}
 688}
 689
 690static void
 691bnx2_free_tx_mem(struct bnx2 *bp)
 692{
 693	int i;
 694
 695	for (i = 0; i < bp->num_tx_rings; i++) {
 696		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 697		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 698
 699		if (txr->tx_desc_ring) {
 700			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 701					  txr->tx_desc_ring,
 702					  txr->tx_desc_mapping);
 703			txr->tx_desc_ring = NULL;
 704		}
 705		kfree(txr->tx_buf_ring);
 706		txr->tx_buf_ring = NULL;
 707	}
 708}
 709
 710static void
 711bnx2_free_rx_mem(struct bnx2 *bp)
 712{
 713	int i;
 714
 715	for (i = 0; i < bp->num_rx_rings; i++) {
 716		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 717		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 718		int j;
 719
 720		for (j = 0; j < bp->rx_max_ring; j++) {
 721			if (rxr->rx_desc_ring[j])
 722				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 723						  rxr->rx_desc_ring[j],
 724						  rxr->rx_desc_mapping[j]);
 725			rxr->rx_desc_ring[j] = NULL;
 726		}
 727		vfree(rxr->rx_buf_ring);
 728		rxr->rx_buf_ring = NULL;
 729
 730		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 731			if (rxr->rx_pg_desc_ring[j])
 732				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
 733						  rxr->rx_pg_desc_ring[j],
 734						  rxr->rx_pg_desc_mapping[j]);
 735			rxr->rx_pg_desc_ring[j] = NULL;
 736		}
 737		vfree(rxr->rx_pg_ring);
 738		rxr->rx_pg_ring = NULL;
 739	}
 740}
 741
 742static int
 743bnx2_alloc_tx_mem(struct bnx2 *bp)
 744{
 745	int i;
 746
 747	for (i = 0; i < bp->num_tx_rings; i++) {
 748		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 749		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 750
 751		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
 752		if (!txr->tx_buf_ring)
 753			return -ENOMEM;
 754
 755		txr->tx_desc_ring =
 756			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
 757					   &txr->tx_desc_mapping, GFP_KERNEL);
 758		if (!txr->tx_desc_ring)
 759			return -ENOMEM;
 760	}
 761	return 0;
 762}
 763
 764static int
 765bnx2_alloc_rx_mem(struct bnx2 *bp)
 766{
 767	int i;
 768
 769	for (i = 0; i < bp->num_rx_rings; i++) {
 770		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 771		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
 772		int j;
 773
 774		rxr->rx_buf_ring =
 775			vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
 776		if (!rxr->rx_buf_ring)
 777			return -ENOMEM;
 778
 779		for (j = 0; j < bp->rx_max_ring; j++) {
 780			rxr->rx_desc_ring[j] =
 781				dma_alloc_coherent(&bp->pdev->dev,
 782						   RXBD_RING_SIZE,
 783						   &rxr->rx_desc_mapping[j],
 784						   GFP_KERNEL);
 785			if (!rxr->rx_desc_ring[j])
 786				return -ENOMEM;
 787
 788		}
 789
 790		if (bp->rx_pg_ring_size) {
 791			rxr->rx_pg_ring =
 792				vzalloc(array_size(SW_RXPG_RING_SIZE,
 793						   bp->rx_max_pg_ring));
 794			if (!rxr->rx_pg_ring)
 795				return -ENOMEM;
 796
 797		}
 798
 799		for (j = 0; j < bp->rx_max_pg_ring; j++) {
 800			rxr->rx_pg_desc_ring[j] =
 801				dma_alloc_coherent(&bp->pdev->dev,
 802						   RXBD_RING_SIZE,
 803						   &rxr->rx_pg_desc_mapping[j],
 804						   GFP_KERNEL);
 805			if (!rxr->rx_pg_desc_ring[j])
 806				return -ENOMEM;
 807
 808		}
 809	}
 810	return 0;
 811}
 812
 813static void
 814bnx2_free_stats_blk(struct net_device *dev)
 815{
 816	struct bnx2 *bp = netdev_priv(dev);
 817
 818	if (bp->status_blk) {
 819		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
 820				  bp->status_blk,
 821				  bp->status_blk_mapping);
 822		bp->status_blk = NULL;
 823		bp->stats_blk = NULL;
 824	}
 825}
 826
 827static int
 828bnx2_alloc_stats_blk(struct net_device *dev)
 829{
 830	int status_blk_size;
 831	void *status_blk;
 832	struct bnx2 *bp = netdev_priv(dev);
 833
 834	/* Combine status and statistics blocks into one allocation. */
 835	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
 836	if (bp->flags & BNX2_FLAG_MSIX_CAP)
 837		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
 838						 BNX2_SBLK_MSIX_ALIGN_SIZE);
 839	bp->status_stats_size = status_blk_size +
 840				sizeof(struct statistics_block);
 841	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
 842					&bp->status_blk_mapping, GFP_KERNEL);
 843	if (!status_blk)
 844		return -ENOMEM;
 845
 846	bp->status_blk = status_blk;
 847	bp->stats_blk = status_blk + status_blk_size;
 848	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
 849
 850	return 0;
 851}
 852
 853static void
 854bnx2_free_mem(struct bnx2 *bp)
 855{
 856	int i;
 857	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
 858
 859	bnx2_free_tx_mem(bp);
 860	bnx2_free_rx_mem(bp);
 861
 862	for (i = 0; i < bp->ctx_pages; i++) {
 863		if (bp->ctx_blk[i]) {
 864			dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
 865					  bp->ctx_blk[i],
 866					  bp->ctx_blk_mapping[i]);
 867			bp->ctx_blk[i] = NULL;
 868		}
 869	}
 870
 871	if (bnapi->status_blk.msi)
 872		bnapi->status_blk.msi = NULL;
 873}
 874
 875static int
 876bnx2_alloc_mem(struct bnx2 *bp)
 877{
 878	int i, err;
 879	struct bnx2_napi *bnapi;
 880
 881	bnapi = &bp->bnx2_napi[0];
 882	bnapi->status_blk.msi = bp->status_blk;
 883	bnapi->hw_tx_cons_ptr =
 884		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 885	bnapi->hw_rx_cons_ptr =
 886		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
 887	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
 888		for (i = 1; i < bp->irq_nvecs; i++) {
 889			struct status_block_msix *sblk;
 890
 891			bnapi = &bp->bnx2_napi[i];
 892
 893			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 894			bnapi->status_blk.msix = sblk;
 895			bnapi->hw_tx_cons_ptr =
 896				&sblk->status_tx_quick_consumer_index;
 897			bnapi->hw_rx_cons_ptr =
 898				&sblk->status_rx_quick_consumer_index;
 899			bnapi->int_num = i << 24;
 900		}
 901	}
 902
 903	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 904		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
 905		if (bp->ctx_pages == 0)
 906			bp->ctx_pages = 1;
 907		for (i = 0; i < bp->ctx_pages; i++) {
 908			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
 909						BNX2_PAGE_SIZE,
 910						&bp->ctx_blk_mapping[i],
 911						GFP_KERNEL);
 912			if (!bp->ctx_blk[i])
 913				goto alloc_mem_err;
 914		}
 915	}
 916
 917	err = bnx2_alloc_rx_mem(bp);
 918	if (err)
 919		goto alloc_mem_err;
 920
 921	err = bnx2_alloc_tx_mem(bp);
 922	if (err)
 923		goto alloc_mem_err;
 924
 925	return 0;
 926
 927alloc_mem_err:
 928	bnx2_free_mem(bp);
 929	return -ENOMEM;
 930}
 931
 932static void
 933bnx2_report_fw_link(struct bnx2 *bp)
 934{
 935	u32 fw_link_status = 0;
 936
 937	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
 938		return;
 939
 940	if (bp->link_up) {
 941		u32 bmsr;
 942
 943		switch (bp->line_speed) {
 944		case SPEED_10:
 945			if (bp->duplex == DUPLEX_HALF)
 946				fw_link_status = BNX2_LINK_STATUS_10HALF;
 947			else
 948				fw_link_status = BNX2_LINK_STATUS_10FULL;
 949			break;
 950		case SPEED_100:
 951			if (bp->duplex == DUPLEX_HALF)
 952				fw_link_status = BNX2_LINK_STATUS_100HALF;
 953			else
 954				fw_link_status = BNX2_LINK_STATUS_100FULL;
 955			break;
 956		case SPEED_1000:
 957			if (bp->duplex == DUPLEX_HALF)
 958				fw_link_status = BNX2_LINK_STATUS_1000HALF;
 959			else
 960				fw_link_status = BNX2_LINK_STATUS_1000FULL;
 961			break;
 962		case SPEED_2500:
 963			if (bp->duplex == DUPLEX_HALF)
 964				fw_link_status = BNX2_LINK_STATUS_2500HALF;
 965			else
 966				fw_link_status = BNX2_LINK_STATUS_2500FULL;
 967			break;
 968		}
 969
 970		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
 971
 972		if (bp->autoneg) {
 973			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
 974
 975			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 976			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
 977
 978			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
 979			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
 980				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
 981			else
 982				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
 983		}
 984	}
 985	else
 986		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
 987
 988	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
 989}
 990
 991static char *
 992bnx2_xceiver_str(struct bnx2 *bp)
 993{
 994	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
 995		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
 996		 "Copper");
 997}
 998
 999static void
1000bnx2_report_link(struct bnx2 *bp)
1001{
1002	if (bp->link_up) {
1003		netif_carrier_on(bp->dev);
1004		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1005			    bnx2_xceiver_str(bp),
1006			    bp->line_speed,
1007			    bp->duplex == DUPLEX_FULL ? "full" : "half");
1008
1009		if (bp->flow_ctrl) {
1010			if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011				pr_cont(", receive ");
1012				if (bp->flow_ctrl & FLOW_CTRL_TX)
1013					pr_cont("& transmit ");
1014			}
1015			else {
1016				pr_cont(", transmit ");
1017			}
1018			pr_cont("flow control ON");
1019		}
1020		pr_cont("\n");
1021	} else {
1022		netif_carrier_off(bp->dev);
1023		netdev_err(bp->dev, "NIC %s Link is Down\n",
1024			   bnx2_xceiver_str(bp));
1025	}
1026
1027	bnx2_report_fw_link(bp);
1028}
1029
1030static void
1031bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1032{
1033	u32 local_adv, remote_adv;
1034
1035	bp->flow_ctrl = 0;
1036	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1037		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1038
1039		if (bp->duplex == DUPLEX_FULL) {
1040			bp->flow_ctrl = bp->req_flow_ctrl;
1041		}
1042		return;
1043	}
1044
1045	if (bp->duplex != DUPLEX_FULL) {
1046		return;
1047	}
1048
1049	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1050	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1051		u32 val;
1052
1053		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1055			bp->flow_ctrl |= FLOW_CTRL_TX;
1056		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1057			bp->flow_ctrl |= FLOW_CTRL_RX;
1058		return;
1059	}
1060
1061	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1062	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063
1064	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1065		u32 new_local_adv = 0;
1066		u32 new_remote_adv = 0;
1067
1068		if (local_adv & ADVERTISE_1000XPAUSE)
1069			new_local_adv |= ADVERTISE_PAUSE_CAP;
1070		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1071			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1072		if (remote_adv & ADVERTISE_1000XPAUSE)
1073			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1074		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1075			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1076
1077		local_adv = new_local_adv;
1078		remote_adv = new_remote_adv;
1079	}
1080
1081	/* See Table 28B-3 of 802.3ab-1999 spec. */
1082	if (local_adv & ADVERTISE_PAUSE_CAP) {
1083		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1084	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1085				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1086			}
1087			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1088				bp->flow_ctrl = FLOW_CTRL_RX;
1089			}
1090		}
1091		else {
1092			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1093				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1094			}
1095		}
1096	}
1097	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1098		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1099			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1100
1101			bp->flow_ctrl = FLOW_CTRL_TX;
1102		}
1103	}
1104}
1105
1106static int
1107bnx2_5709s_linkup(struct bnx2 *bp)
1108{
1109	u32 val, speed;
1110
1111	bp->link_up = 1;
1112
1113	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1114	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1115	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116
1117	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1118		bp->line_speed = bp->req_line_speed;
1119		bp->duplex = bp->req_duplex;
1120		return 0;
1121	}
1122	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1123	switch (speed) {
1124		case MII_BNX2_GP_TOP_AN_SPEED_10:
1125			bp->line_speed = SPEED_10;
1126			break;
1127		case MII_BNX2_GP_TOP_AN_SPEED_100:
1128			bp->line_speed = SPEED_100;
1129			break;
1130		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1131		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1132			bp->line_speed = SPEED_1000;
1133			break;
1134		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1135			bp->line_speed = SPEED_2500;
1136			break;
1137	}
1138	if (val & MII_BNX2_GP_TOP_AN_FD)
1139		bp->duplex = DUPLEX_FULL;
1140	else
1141		bp->duplex = DUPLEX_HALF;
1142	return 0;
1143}
1144
1145static int
1146bnx2_5708s_linkup(struct bnx2 *bp)
1147{
1148	u32 val;
1149
1150	bp->link_up = 1;
1151	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1152	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1153		case BCM5708S_1000X_STAT1_SPEED_10:
1154			bp->line_speed = SPEED_10;
1155			break;
1156		case BCM5708S_1000X_STAT1_SPEED_100:
1157			bp->line_speed = SPEED_100;
1158			break;
1159		case BCM5708S_1000X_STAT1_SPEED_1G:
1160			bp->line_speed = SPEED_1000;
1161			break;
1162		case BCM5708S_1000X_STAT1_SPEED_2G5:
1163			bp->line_speed = SPEED_2500;
1164			break;
1165	}
1166	if (val & BCM5708S_1000X_STAT1_FD)
1167		bp->duplex = DUPLEX_FULL;
1168	else
1169		bp->duplex = DUPLEX_HALF;
1170
1171	return 0;
1172}
1173
1174static int
1175bnx2_5706s_linkup(struct bnx2 *bp)
1176{
1177	u32 bmcr, local_adv, remote_adv, common;
1178
1179	bp->link_up = 1;
1180	bp->line_speed = SPEED_1000;
1181
1182	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183	if (bmcr & BMCR_FULLDPLX) {
1184		bp->duplex = DUPLEX_FULL;
1185	}
1186	else {
1187		bp->duplex = DUPLEX_HALF;
1188	}
1189
1190	if (!(bmcr & BMCR_ANENABLE)) {
1191		return 0;
1192	}
1193
1194	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1195	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1196
1197	common = local_adv & remote_adv;
1198	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1199
1200		if (common & ADVERTISE_1000XFULL) {
1201			bp->duplex = DUPLEX_FULL;
1202		}
1203		else {
1204			bp->duplex = DUPLEX_HALF;
1205		}
1206	}
1207
1208	return 0;
1209}
1210
1211static int
1212bnx2_copper_linkup(struct bnx2 *bp)
1213{
1214	u32 bmcr;
1215
1216	bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217
1218	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1219	if (bmcr & BMCR_ANENABLE) {
1220		u32 local_adv, remote_adv, common;
1221
1222		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1223		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1224
1225		common = local_adv & (remote_adv >> 2);
1226		if (common & ADVERTISE_1000FULL) {
1227			bp->line_speed = SPEED_1000;
1228			bp->duplex = DUPLEX_FULL;
1229		}
1230		else if (common & ADVERTISE_1000HALF) {
1231			bp->line_speed = SPEED_1000;
1232			bp->duplex = DUPLEX_HALF;
1233		}
1234		else {
1235			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1236			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1237
1238			common = local_adv & remote_adv;
1239			if (common & ADVERTISE_100FULL) {
1240				bp->line_speed = SPEED_100;
1241				bp->duplex = DUPLEX_FULL;
1242			}
1243			else if (common & ADVERTISE_100HALF) {
1244				bp->line_speed = SPEED_100;
1245				bp->duplex = DUPLEX_HALF;
1246			}
1247			else if (common & ADVERTISE_10FULL) {
1248				bp->line_speed = SPEED_10;
1249				bp->duplex = DUPLEX_FULL;
1250			}
1251			else if (common & ADVERTISE_10HALF) {
1252				bp->line_speed = SPEED_10;
1253				bp->duplex = DUPLEX_HALF;
1254			}
1255			else {
1256				bp->line_speed = 0;
1257				bp->link_up = 0;
1258			}
1259		}
1260	}
1261	else {
1262		if (bmcr & BMCR_SPEED100) {
1263			bp->line_speed = SPEED_100;
1264		}
1265		else {
1266			bp->line_speed = SPEED_10;
1267		}
1268		if (bmcr & BMCR_FULLDPLX) {
1269			bp->duplex = DUPLEX_FULL;
1270		}
1271		else {
1272			bp->duplex = DUPLEX_HALF;
1273		}
1274	}
1275
1276	if (bp->link_up) {
1277		u32 ext_status;
1278
1279		bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280		if (ext_status & EXT_STATUS_MDIX)
1281			bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1282	}
1283
1284	return 0;
1285}
1286
1287static void
1288bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1289{
1290	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1291
1292	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1293	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1294	val |= 0x02 << 8;
1295
1296	if (bp->flow_ctrl & FLOW_CTRL_TX)
1297		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
1298
1299	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1300}
1301
1302static void
1303bnx2_init_all_rx_contexts(struct bnx2 *bp)
1304{
1305	int i;
1306	u32 cid;
1307
1308	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1309		if (i == 1)
1310			cid = RX_RSS_CID;
1311		bnx2_init_rx_context(bp, cid);
1312	}
1313}
1314
1315static void
1316bnx2_set_mac_link(struct bnx2 *bp)
1317{
1318	u32 val;
1319
1320	BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1321	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1322		(bp->duplex == DUPLEX_HALF)) {
1323		BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1324	}
1325
1326	/* Configure the EMAC mode register. */
1327	val = BNX2_RD(bp, BNX2_EMAC_MODE);
1328
1329	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1330		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1331		BNX2_EMAC_MODE_25G_MODE);
1332
1333	if (bp->link_up) {
1334		switch (bp->line_speed) {
1335			case SPEED_10:
1336				if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1337					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1338					break;
1339				}
1340				fallthrough;
1341			case SPEED_100:
1342				val |= BNX2_EMAC_MODE_PORT_MII;
1343				break;
1344			case SPEED_2500:
1345				val |= BNX2_EMAC_MODE_25G_MODE;
1346				fallthrough;
1347			case SPEED_1000:
1348				val |= BNX2_EMAC_MODE_PORT_GMII;
1349				break;
1350		}
1351	}
1352	else {
1353		val |= BNX2_EMAC_MODE_PORT_GMII;
1354	}
1355
1356	/* Set the MAC to operate in the appropriate duplex mode. */
1357	if (bp->duplex == DUPLEX_HALF)
1358		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1359	BNX2_WR(bp, BNX2_EMAC_MODE, val);
1360
1361	/* Enable/disable rx PAUSE. */
1362	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363
1364	if (bp->flow_ctrl & FLOW_CTRL_RX)
1365		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1366	BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1367
1368	/* Enable/disable tx PAUSE. */
1369	val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1370	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1371
1372	if (bp->flow_ctrl & FLOW_CTRL_TX)
1373		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1374	BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1375
1376	/* Acknowledge the interrupt. */
1377	BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378
1379	bnx2_init_all_rx_contexts(bp);
1380}
1381
1382static void
1383bnx2_enable_bmsr1(struct bnx2 *bp)
1384{
1385	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1387		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388			       MII_BNX2_BLK_ADDR_GP_STATUS);
1389}
1390
1391static void
1392bnx2_disable_bmsr1(struct bnx2 *bp)
1393{
1394	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1395	    (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1396		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1397			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1398}
1399
1400static int
1401bnx2_test_and_enable_2g5(struct bnx2 *bp)
1402{
1403	u32 up1;
1404	int ret = 1;
1405
1406	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1407		return 0;
1408
1409	if (bp->autoneg & AUTONEG_SPEED)
1410		bp->advertising |= ADVERTISED_2500baseX_Full;
1411
1412	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1413		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414
1415	bnx2_read_phy(bp, bp->mii_up1, &up1);
1416	if (!(up1 & BCM5708S_UP1_2G5)) {
1417		up1 |= BCM5708S_UP1_2G5;
1418		bnx2_write_phy(bp, bp->mii_up1, up1);
1419		ret = 0;
1420	}
1421
1422	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1423		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1424			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1425
1426	return ret;
1427}
1428
1429static int
1430bnx2_test_and_disable_2g5(struct bnx2 *bp)
1431{
1432	u32 up1;
1433	int ret = 0;
1434
1435	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1436		return 0;
1437
1438	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1439		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440
1441	bnx2_read_phy(bp, bp->mii_up1, &up1);
1442	if (up1 & BCM5708S_UP1_2G5) {
1443		up1 &= ~BCM5708S_UP1_2G5;
1444		bnx2_write_phy(bp, bp->mii_up1, up1);
1445		ret = 1;
1446	}
1447
1448	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1449		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1450			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1451
1452	return ret;
1453}
1454
1455static void
1456bnx2_enable_forced_2g5(struct bnx2 *bp)
1457{
1458	u32 bmcr;
1459	int err;
1460
1461	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1462		return;
1463
1464	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1465		u32 val;
1466
1467		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1469		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1470			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1471			val |= MII_BNX2_SD_MISC1_FORCE |
1472				MII_BNX2_SD_MISC1_FORCE_2_5G;
1473			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1474		}
1475
1476		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1478		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479
1480	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1481		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1482		if (!err)
1483			bmcr |= BCM5708S_BMCR_FORCE_2500;
1484	} else {
1485		return;
1486	}
1487
1488	if (err)
1489		return;
1490
1491	if (bp->autoneg & AUTONEG_SPEED) {
1492		bmcr &= ~BMCR_ANENABLE;
1493		if (bp->req_duplex == DUPLEX_FULL)
1494			bmcr |= BMCR_FULLDPLX;
1495	}
1496	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1497}
1498
1499static void
1500bnx2_disable_forced_2g5(struct bnx2 *bp)
1501{
1502	u32 bmcr;
1503	int err;
1504
1505	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1506		return;
1507
1508	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1509		u32 val;
1510
1511		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1513		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514			val &= ~MII_BNX2_SD_MISC1_FORCE;
1515			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1516		}
1517
1518		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1520		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521
1522	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1523		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1524		if (!err)
1525			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1526	} else {
1527		return;
1528	}
1529
1530	if (err)
1531		return;
1532
1533	if (bp->autoneg & AUTONEG_SPEED)
1534		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1535	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1536}
1537
1538static void
1539bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1540{
1541	u32 val;
1542
1543	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1544	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545	if (start)
1546		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547	else
1548		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1549}
1550
1551static int
1552bnx2_set_link(struct bnx2 *bp)
1553{
1554	u32 bmsr;
1555	u8 link_up;
1556
1557	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1558		bp->link_up = 1;
1559		return 0;
1560	}
1561
1562	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1563		return 0;
1564
1565	link_up = bp->link_up;
1566
1567	bnx2_enable_bmsr1(bp);
1568	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1570	bnx2_disable_bmsr1(bp);
1571
1572	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1573	    (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1574		u32 val, an_dbg;
1575
1576		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1577			bnx2_5706s_force_link_dn(bp, 0);
1578			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579		}
1580		val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581
1582		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1583		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1584		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1585
1586		if ((val & BNX2_EMAC_STATUS_LINK) &&
1587		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1588			bmsr |= BMSR_LSTATUS;
1589		else
1590			bmsr &= ~BMSR_LSTATUS;
1591	}
1592
1593	if (bmsr & BMSR_LSTATUS) {
1594		bp->link_up = 1;
1595
1596		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1597			if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1598				bnx2_5706s_linkup(bp);
1599			else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1600				bnx2_5708s_linkup(bp);
1601			else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1602				bnx2_5709s_linkup(bp);
1603		}
1604		else {
1605			bnx2_copper_linkup(bp);
1606		}
1607		bnx2_resolve_flow_ctrl(bp);
1608	}
1609	else {
1610		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1611		    (bp->autoneg & AUTONEG_SPEED))
1612			bnx2_disable_forced_2g5(bp);
1613
1614		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1615			u32 bmcr;
1616
1617			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618			bmcr |= BMCR_ANENABLE;
1619			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620
1621			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622		}
1623		bp->link_up = 0;
1624	}
1625
1626	if (bp->link_up != link_up) {
1627		bnx2_report_link(bp);
1628	}
1629
1630	bnx2_set_mac_link(bp);
1631
1632	return 0;
1633}
1634
1635static int
1636bnx2_reset_phy(struct bnx2 *bp)
1637{
1638	int i;
1639	u32 reg;
1640
1641        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1642
1643#define PHY_RESET_MAX_WAIT 100
1644	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1645		udelay(10);
1646
1647		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1648		if (!(reg & BMCR_RESET)) {
1649			udelay(20);
1650			break;
1651		}
1652	}
1653	if (i == PHY_RESET_MAX_WAIT) {
1654		return -EBUSY;
1655	}
1656	return 0;
1657}
1658
1659static u32
1660bnx2_phy_get_pause_adv(struct bnx2 *bp)
1661{
1662	u32 adv = 0;
1663
1664	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1665		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1666
1667		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1668			adv = ADVERTISE_1000XPAUSE;
1669		}
1670		else {
1671			adv = ADVERTISE_PAUSE_CAP;
1672		}
1673	}
1674	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1675		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1676			adv = ADVERTISE_1000XPSE_ASYM;
1677		}
1678		else {
1679			adv = ADVERTISE_PAUSE_ASYM;
1680		}
1681	}
1682	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1683		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1684			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1685		}
1686		else {
1687			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1688		}
1689	}
1690	return adv;
1691}
1692
1693static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1694
1695static int
1696bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1697__releases(&bp->phy_lock)
1698__acquires(&bp->phy_lock)
1699{
1700	u32 speed_arg = 0, pause_adv;
1701
1702	pause_adv = bnx2_phy_get_pause_adv(bp);
1703
1704	if (bp->autoneg & AUTONEG_SPEED) {
1705		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1706		if (bp->advertising & ADVERTISED_10baseT_Half)
1707			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1708		if (bp->advertising & ADVERTISED_10baseT_Full)
1709			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1710		if (bp->advertising & ADVERTISED_100baseT_Half)
1711			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1712		if (bp->advertising & ADVERTISED_100baseT_Full)
1713			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1714		if (bp->advertising & ADVERTISED_1000baseT_Full)
1715			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1716		if (bp->advertising & ADVERTISED_2500baseX_Full)
1717			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718	} else {
1719		if (bp->req_line_speed == SPEED_2500)
1720			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1721		else if (bp->req_line_speed == SPEED_1000)
1722			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1723		else if (bp->req_line_speed == SPEED_100) {
1724			if (bp->req_duplex == DUPLEX_FULL)
1725				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1726			else
1727				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1728		} else if (bp->req_line_speed == SPEED_10) {
1729			if (bp->req_duplex == DUPLEX_FULL)
1730				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1731			else
1732				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1733		}
1734	}
1735
1736	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1737		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1738	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1739		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1740
1741	if (port == PORT_TP)
1742		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1743			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1744
1745	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746
1747	spin_unlock_bh(&bp->phy_lock);
1748	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1749	spin_lock_bh(&bp->phy_lock);
1750
1751	return 0;
1752}
1753
1754static int
1755bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1756__releases(&bp->phy_lock)
1757__acquires(&bp->phy_lock)
1758{
1759	u32 adv, bmcr;
1760	u32 new_adv = 0;
1761
1762	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1763		return bnx2_setup_remote_phy(bp, port);
1764
1765	if (!(bp->autoneg & AUTONEG_SPEED)) {
1766		u32 new_bmcr;
1767		int force_link_down = 0;
1768
1769		if (bp->req_line_speed == SPEED_2500) {
1770			if (!bnx2_test_and_enable_2g5(bp))
1771				force_link_down = 1;
1772		} else if (bp->req_line_speed == SPEED_1000) {
1773			if (bnx2_test_and_disable_2g5(bp))
1774				force_link_down = 1;
1775		}
1776		bnx2_read_phy(bp, bp->mii_adv, &adv);
1777		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1778
1779		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1780		new_bmcr = bmcr & ~BMCR_ANENABLE;
1781		new_bmcr |= BMCR_SPEED1000;
1782
1783		if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1784			if (bp->req_line_speed == SPEED_2500)
1785				bnx2_enable_forced_2g5(bp);
1786			else if (bp->req_line_speed == SPEED_1000) {
1787				bnx2_disable_forced_2g5(bp);
1788				new_bmcr &= ~0x2000;
1789			}
1790
1791		} else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1792			if (bp->req_line_speed == SPEED_2500)
1793				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1794			else
1795				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1796		}
1797
1798		if (bp->req_duplex == DUPLEX_FULL) {
1799			adv |= ADVERTISE_1000XFULL;
1800			new_bmcr |= BMCR_FULLDPLX;
1801		}
1802		else {
1803			adv |= ADVERTISE_1000XHALF;
1804			new_bmcr &= ~BMCR_FULLDPLX;
1805		}
1806		if ((new_bmcr != bmcr) || (force_link_down)) {
1807			/* Force a link down visible on the other side */
1808			if (bp->link_up) {
1809				bnx2_write_phy(bp, bp->mii_adv, adv &
1810					       ~(ADVERTISE_1000XFULL |
1811						 ADVERTISE_1000XHALF));
1812				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1813					BMCR_ANRESTART | BMCR_ANENABLE);
1814
1815				bp->link_up = 0;
1816				netif_carrier_off(bp->dev);
1817				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1818				bnx2_report_link(bp);
1819			}
1820			bnx2_write_phy(bp, bp->mii_adv, adv);
1821			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822		} else {
1823			bnx2_resolve_flow_ctrl(bp);
1824			bnx2_set_mac_link(bp);
1825		}
1826		return 0;
1827	}
1828
1829	bnx2_test_and_enable_2g5(bp);
1830
1831	if (bp->advertising & ADVERTISED_1000baseT_Full)
1832		new_adv |= ADVERTISE_1000XFULL;
1833
1834	new_adv |= bnx2_phy_get_pause_adv(bp);
1835
1836	bnx2_read_phy(bp, bp->mii_adv, &adv);
1837	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838
1839	bp->serdes_an_pending = 0;
1840	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1841		/* Force a link down visible on the other side */
1842		if (bp->link_up) {
1843			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1844			spin_unlock_bh(&bp->phy_lock);
1845			msleep(20);
1846			spin_lock_bh(&bp->phy_lock);
1847		}
1848
1849		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1850		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1851			BMCR_ANENABLE);
1852		/* Speed up link-up time when the link partner
1853		 * does not autonegotiate which is very common
1854		 * in blade servers. Some blade servers use
1855		 * IPMI for kerboard input and it's important
1856		 * to minimize link disruptions. Autoneg. involves
1857		 * exchanging base pages plus 3 next pages and
1858		 * normally completes in about 120 msec.
1859		 */
1860		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1861		bp->serdes_an_pending = 1;
1862		mod_timer(&bp->timer, jiffies + bp->current_interval);
1863	} else {
1864		bnx2_resolve_flow_ctrl(bp);
1865		bnx2_set_mac_link(bp);
1866	}
1867
1868	return 0;
1869}
1870
1871#define ETHTOOL_ALL_FIBRE_SPEED						\
1872	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1873		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1874		(ADVERTISED_1000baseT_Full)
1875
1876#define ETHTOOL_ALL_COPPER_SPEED					\
1877	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1878	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1879	ADVERTISED_1000baseT_Full)
1880
1881#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1882	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1883
1884#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1885
1886static void
1887bnx2_set_default_remote_link(struct bnx2 *bp)
1888{
1889	u32 link;
1890
1891	if (bp->phy_port == PORT_TP)
1892		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893	else
1894		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1895
1896	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1897		bp->req_line_speed = 0;
1898		bp->autoneg |= AUTONEG_SPEED;
1899		bp->advertising = ADVERTISED_Autoneg;
1900		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1901			bp->advertising |= ADVERTISED_10baseT_Half;
1902		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1903			bp->advertising |= ADVERTISED_10baseT_Full;
1904		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905			bp->advertising |= ADVERTISED_100baseT_Half;
1906		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1907			bp->advertising |= ADVERTISED_100baseT_Full;
1908		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1909			bp->advertising |= ADVERTISED_1000baseT_Full;
1910		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1911			bp->advertising |= ADVERTISED_2500baseX_Full;
1912	} else {
1913		bp->autoneg = 0;
1914		bp->advertising = 0;
1915		bp->req_duplex = DUPLEX_FULL;
1916		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1917			bp->req_line_speed = SPEED_10;
1918			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1919				bp->req_duplex = DUPLEX_HALF;
1920		}
1921		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1922			bp->req_line_speed = SPEED_100;
1923			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1924				bp->req_duplex = DUPLEX_HALF;
1925		}
1926		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1927			bp->req_line_speed = SPEED_1000;
1928		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1929			bp->req_line_speed = SPEED_2500;
1930	}
1931}
1932
1933static void
1934bnx2_set_default_link(struct bnx2 *bp)
1935{
1936	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1937		bnx2_set_default_remote_link(bp);
1938		return;
1939	}
1940
1941	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1942	bp->req_line_speed = 0;
1943	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1944		u32 reg;
1945
1946		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947
1948		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1949		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1950		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1951			bp->autoneg = 0;
1952			bp->req_line_speed = bp->line_speed = SPEED_1000;
1953			bp->req_duplex = DUPLEX_FULL;
1954		}
1955	} else
1956		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1957}
1958
1959static void
1960bnx2_send_heart_beat(struct bnx2 *bp)
1961{
1962	u32 msg;
1963	u32 addr;
1964
1965	spin_lock(&bp->indirect_lock);
1966	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1967	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1968	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1969	BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1970	spin_unlock(&bp->indirect_lock);
1971}
1972
1973static void
1974bnx2_remote_phy_event(struct bnx2 *bp)
1975{
1976	u32 msg;
1977	u8 link_up = bp->link_up;
1978	u8 old_port;
1979
1980	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1981
1982	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1983		bnx2_send_heart_beat(bp);
1984
1985	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1986
1987	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1988		bp->link_up = 0;
1989	else {
1990		u32 speed;
1991
1992		bp->link_up = 1;
1993		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1994		bp->duplex = DUPLEX_FULL;
1995		switch (speed) {
1996			case BNX2_LINK_STATUS_10HALF:
1997				bp->duplex = DUPLEX_HALF;
1998				fallthrough;
1999			case BNX2_LINK_STATUS_10FULL:
2000				bp->line_speed = SPEED_10;
2001				break;
2002			case BNX2_LINK_STATUS_100HALF:
2003				bp->duplex = DUPLEX_HALF;
2004				fallthrough;
2005			case BNX2_LINK_STATUS_100BASE_T4:
2006			case BNX2_LINK_STATUS_100FULL:
2007				bp->line_speed = SPEED_100;
2008				break;
2009			case BNX2_LINK_STATUS_1000HALF:
2010				bp->duplex = DUPLEX_HALF;
2011				fallthrough;
2012			case BNX2_LINK_STATUS_1000FULL:
2013				bp->line_speed = SPEED_1000;
2014				break;
2015			case BNX2_LINK_STATUS_2500HALF:
2016				bp->duplex = DUPLEX_HALF;
2017				fallthrough;
2018			case BNX2_LINK_STATUS_2500FULL:
2019				bp->line_speed = SPEED_2500;
2020				break;
2021			default:
2022				bp->line_speed = 0;
2023				break;
2024		}
2025
2026		bp->flow_ctrl = 0;
2027		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2029			if (bp->duplex == DUPLEX_FULL)
2030				bp->flow_ctrl = bp->req_flow_ctrl;
2031		} else {
2032			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2033				bp->flow_ctrl |= FLOW_CTRL_TX;
2034			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2035				bp->flow_ctrl |= FLOW_CTRL_RX;
2036		}
2037
2038		old_port = bp->phy_port;
2039		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2040			bp->phy_port = PORT_FIBRE;
2041		else
2042			bp->phy_port = PORT_TP;
2043
2044		if (old_port != bp->phy_port)
2045			bnx2_set_default_link(bp);
2046
2047	}
2048	if (bp->link_up != link_up)
2049		bnx2_report_link(bp);
2050
2051	bnx2_set_mac_link(bp);
2052}
2053
2054static int
2055bnx2_set_remote_link(struct bnx2 *bp)
2056{
2057	u32 evt_code;
2058
2059	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2060	switch (evt_code) {
2061		case BNX2_FW_EVT_CODE_LINK_EVENT:
2062			bnx2_remote_phy_event(bp);
2063			break;
2064		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2065		default:
2066			bnx2_send_heart_beat(bp);
2067			break;
2068	}
2069	return 0;
2070}
2071
2072static int
2073bnx2_setup_copper_phy(struct bnx2 *bp)
2074__releases(&bp->phy_lock)
2075__acquires(&bp->phy_lock)
2076{
2077	u32 bmcr, adv_reg, new_adv = 0;
2078	u32 new_bmcr;
2079
2080	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081
2082	bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2083	adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2084		    ADVERTISE_PAUSE_ASYM);
2085
2086	new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087
2088	if (bp->autoneg & AUTONEG_SPEED) {
2089		u32 adv1000_reg;
2090		u32 new_adv1000 = 0;
2091
2092		new_adv |= bnx2_phy_get_pause_adv(bp);
2093
2094		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2095		adv1000_reg &= PHY_ALL_1000_SPEED;
2096
2097		new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2098		if ((adv1000_reg != new_adv1000) ||
2099			(adv_reg != new_adv) ||
2100			((bmcr & BMCR_ANENABLE) == 0)) {
2101
2102			bnx2_write_phy(bp, bp->mii_adv, new_adv);
2103			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2104			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105				BMCR_ANENABLE);
2106		}
2107		else if (bp->link_up) {
2108			/* Flow ctrl may have changed from auto to forced */
2109			/* or vice-versa. */
2110
2111			bnx2_resolve_flow_ctrl(bp);
2112			bnx2_set_mac_link(bp);
2113		}
2114		return 0;
2115	}
2116
2117	/* advertise nothing when forcing speed */
2118	if (adv_reg != new_adv)
2119		bnx2_write_phy(bp, bp->mii_adv, new_adv);
2120
2121	new_bmcr = 0;
2122	if (bp->req_line_speed == SPEED_100) {
2123		new_bmcr |= BMCR_SPEED100;
2124	}
2125	if (bp->req_duplex == DUPLEX_FULL) {
2126		new_bmcr |= BMCR_FULLDPLX;
2127	}
2128	if (new_bmcr != bmcr) {
2129		u32 bmsr;
2130
2131		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134		if (bmsr & BMSR_LSTATUS) {
2135			/* Force link down */
2136			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137			spin_unlock_bh(&bp->phy_lock);
2138			msleep(50);
2139			spin_lock_bh(&bp->phy_lock);
2140
2141			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143		}
2144
2145		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147		/* Normally, the new speed is setup after the link has
2148		 * gone down and up again. In some cases, link will not go
2149		 * down so we need to set up the new speed here.
2150		 */
2151		if (bmsr & BMSR_LSTATUS) {
2152			bp->line_speed = bp->req_line_speed;
2153			bp->duplex = bp->req_duplex;
2154			bnx2_resolve_flow_ctrl(bp);
2155			bnx2_set_mac_link(bp);
2156		}
2157	} else {
2158		bnx2_resolve_flow_ctrl(bp);
2159		bnx2_set_mac_link(bp);
2160	}
2161	return 0;
2162}
2163
2164static int
2165bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166__releases(&bp->phy_lock)
2167__acquires(&bp->phy_lock)
2168{
2169	if (bp->loopback == MAC_LOOPBACK)
2170		return 0;
2171
2172	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173		return bnx2_setup_serdes_phy(bp, port);
2174	}
2175	else {
2176		return bnx2_setup_copper_phy(bp);
2177	}
2178}
2179
2180static int
2181bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182{
2183	u32 val;
2184
2185	bp->mii_bmcr = MII_BMCR + 0x10;
2186	bp->mii_bmsr = MII_BMSR + 0x10;
2187	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188	bp->mii_adv = MII_ADVERTISE + 0x10;
2189	bp->mii_lpa = MII_LPA + 0x10;
2190	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196	if (reset_phy)
2197		bnx2_reset_phy(bp);
2198
2199	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209		val |= BCM5708S_UP1_2G5;
2210	else
2211		val &= ~BCM5708S_UP1_2G5;
2212	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227	return 0;
2228}
2229
2230static int
2231bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232{
2233	u32 val;
2234
2235	if (reset_phy)
2236		bnx2_reset_phy(bp);
2237
2238	bp->mii_up1 = BCM5708S_UP1;
2239
2240	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254		val |= BCM5708S_UP1_2G5;
2255		bnx2_write_phy(bp, BCM5708S_UP1, val);
2256	}
2257
2258	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2259	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2260	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261		/* increase tx signal amplitude */
2262		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263			       BCM5708S_BLK_ADDR_TX_MISC);
2264		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268	}
2269
2270	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273	if (val) {
2274		u32 is_backplane;
2275
2276		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279				       BCM5708S_BLK_ADDR_TX_MISC);
2280			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282				       BCM5708S_BLK_ADDR_DIG);
2283		}
2284	}
2285	return 0;
2286}
2287
2288static int
2289bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290{
2291	if (reset_phy)
2292		bnx2_reset_phy(bp);
2293
2294	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296	if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2297		BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299	if (bp->dev->mtu > ETH_DATA_LEN) {
2300		u32 val;
2301
2302		/* Set extended packet length bit */
2303		bnx2_write_phy(bp, 0x18, 0x7);
2304		bnx2_read_phy(bp, 0x18, &val);
2305		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307		bnx2_write_phy(bp, 0x1c, 0x6c00);
2308		bnx2_read_phy(bp, 0x1c, &val);
2309		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310	}
2311	else {
2312		u32 val;
2313
2314		bnx2_write_phy(bp, 0x18, 0x7);
2315		bnx2_read_phy(bp, 0x18, &val);
2316		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318		bnx2_write_phy(bp, 0x1c, 0x6c00);
2319		bnx2_read_phy(bp, 0x1c, &val);
2320		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321	}
2322
2323	return 0;
2324}
2325
2326static int
2327bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328{
2329	u32 val;
2330
2331	if (reset_phy)
2332		bnx2_reset_phy(bp);
2333
2334	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335		bnx2_write_phy(bp, 0x18, 0x0c00);
2336		bnx2_write_phy(bp, 0x17, 0x000a);
2337		bnx2_write_phy(bp, 0x15, 0x310b);
2338		bnx2_write_phy(bp, 0x17, 0x201f);
2339		bnx2_write_phy(bp, 0x15, 0x9506);
2340		bnx2_write_phy(bp, 0x17, 0x401f);
2341		bnx2_write_phy(bp, 0x15, 0x14e2);
2342		bnx2_write_phy(bp, 0x18, 0x0400);
2343	}
2344
2345	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2348		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349		val &= ~(1 << 8);
2350		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351	}
2352
2353	if (bp->dev->mtu > ETH_DATA_LEN) {
2354		/* Set extended packet length bit */
2355		bnx2_write_phy(bp, 0x18, 0x7);
2356		bnx2_read_phy(bp, 0x18, &val);
2357		bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359		bnx2_read_phy(bp, 0x10, &val);
2360		bnx2_write_phy(bp, 0x10, val | 0x1);
2361	}
2362	else {
2363		bnx2_write_phy(bp, 0x18, 0x7);
2364		bnx2_read_phy(bp, 0x18, &val);
2365		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367		bnx2_read_phy(bp, 0x10, &val);
2368		bnx2_write_phy(bp, 0x10, val & ~0x1);
2369	}
2370
2371	/* ethernet@wirespeed */
2372	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2373	bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2374	val |=  AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED;
2375
2376	/* auto-mdix */
2377	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2378		val |=  AUX_CTL_MISC_CTL_AUTOMDIX;
2379
2380	bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2381	return 0;
2382}
2383
2384
2385static int
2386bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2387__releases(&bp->phy_lock)
2388__acquires(&bp->phy_lock)
2389{
2390	u32 val;
2391	int rc = 0;
2392
2393	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2394	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395
2396	bp->mii_bmcr = MII_BMCR;
2397	bp->mii_bmsr = MII_BMSR;
2398	bp->mii_bmsr1 = MII_BMSR;
2399	bp->mii_adv = MII_ADVERTISE;
2400	bp->mii_lpa = MII_LPA;
2401
2402	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403
2404	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2405		goto setup_phy;
2406
2407	bnx2_read_phy(bp, MII_PHYSID1, &val);
2408	bp->phy_id = val << 16;
2409	bnx2_read_phy(bp, MII_PHYSID2, &val);
2410	bp->phy_id |= val & 0xffff;
2411
2412	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2413		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2414			rc = bnx2_init_5706s_phy(bp, reset_phy);
2415		else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2416			rc = bnx2_init_5708s_phy(bp, reset_phy);
2417		else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2418			rc = bnx2_init_5709s_phy(bp, reset_phy);
2419	}
2420	else {
2421		rc = bnx2_init_copper_phy(bp, reset_phy);
2422	}
2423
2424setup_phy:
2425	if (!rc)
2426		rc = bnx2_setup_phy(bp, bp->phy_port);
2427
2428	return rc;
2429}
2430
2431static int
2432bnx2_set_mac_loopback(struct bnx2 *bp)
2433{
2434	u32 mac_mode;
2435
2436	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2437	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2438	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2439	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2440	bp->link_up = 1;
2441	return 0;
2442}
2443
2444static int bnx2_test_link(struct bnx2 *);
2445
2446static int
2447bnx2_set_phy_loopback(struct bnx2 *bp)
2448{
2449	u32 mac_mode;
2450	int rc, i;
2451
2452	spin_lock_bh(&bp->phy_lock);
2453	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454			    BMCR_SPEED1000);
2455	spin_unlock_bh(&bp->phy_lock);
2456	if (rc)
2457		return rc;
2458
2459	for (i = 0; i < 10; i++) {
2460		if (bnx2_test_link(bp) == 0)
2461			break;
2462		msleep(100);
2463	}
2464
2465	mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2466	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2467		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2468		      BNX2_EMAC_MODE_25G_MODE);
2469
2470	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2471	BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2472	bp->link_up = 1;
2473	return 0;
2474}
2475
2476static void
2477bnx2_dump_mcp_state(struct bnx2 *bp)
2478{
2479	struct net_device *dev = bp->dev;
2480	u32 mcp_p0, mcp_p1;
2481
2482	netdev_err(dev, "<--- start MCP states dump --->\n");
2483	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2484		mcp_p0 = BNX2_MCP_STATE_P0;
2485		mcp_p1 = BNX2_MCP_STATE_P1;
2486	} else {
2487		mcp_p0 = BNX2_MCP_STATE_P0_5708;
2488		mcp_p1 = BNX2_MCP_STATE_P1_5708;
2489	}
2490	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
2491		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
2493		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2494		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2495		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
2497		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2499		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2500	netdev_err(dev, "DEBUG: shmem states:\n");
2501	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
2502		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
2503		   bnx2_shmem_rd(bp, BNX2_FW_MB),
2504		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2505	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
2507		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2508		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509	pr_cont(" condition[%08x]\n",
2510		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2511	DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2512	DP_SHMEM_LINE(bp, 0x3cc);
2513	DP_SHMEM_LINE(bp, 0x3dc);
2514	DP_SHMEM_LINE(bp, 0x3ec);
2515	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2516	netdev_err(dev, "<--- end MCP states dump --->\n");
2517}
2518
2519static int
2520bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2521{
2522	int i;
2523	u32 val;
2524
2525	bp->fw_wr_seq++;
2526	msg_data |= bp->fw_wr_seq;
2527	bp->fw_last_msg = msg_data;
2528
2529	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2530
2531	if (!ack)
2532		return 0;
2533
2534	/* wait for an acknowledgement. */
2535	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2536		msleep(10);
2537
2538		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2539
2540		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2541			break;
2542	}
2543	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2544		return 0;
2545
2546	/* If we timed out, inform the firmware that this is the case. */
2547	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2548		msg_data &= ~BNX2_DRV_MSG_CODE;
2549		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2550
2551		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2552		if (!silent) {
2553			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2554			bnx2_dump_mcp_state(bp);
2555		}
2556
2557		return -EBUSY;
2558	}
2559
2560	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2561		return -EIO;
2562
2563	return 0;
2564}
2565
2566static int
2567bnx2_init_5709_context(struct bnx2 *bp)
2568{
2569	int i, ret = 0;
2570	u32 val;
2571
2572	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2573	val |= (BNX2_PAGE_BITS - 8) << 16;
2574	BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575	for (i = 0; i < 10; i++) {
2576		val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2577		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2578			break;
2579		udelay(2);
2580	}
2581	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2582		return -EBUSY;
2583
2584	for (i = 0; i < bp->ctx_pages; i++) {
2585		int j;
2586
2587		if (bp->ctx_blk[i])
2588			memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2589		else
2590			return -ENOMEM;
2591
2592		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2593			(bp->ctx_blk_mapping[i] & 0xffffffff) |
2594			BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2595		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2596			(u64) bp->ctx_blk_mapping[i] >> 32);
2597		BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2598			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2599		for (j = 0; j < 10; j++) {
2600
2601			val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2602			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2603				break;
2604			udelay(5);
2605		}
2606		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2607			ret = -EBUSY;
2608			break;
2609		}
2610	}
2611	return ret;
2612}
2613
2614static void
2615bnx2_init_context(struct bnx2 *bp)
2616{
2617	u32 vcid;
2618
2619	vcid = 96;
2620	while (vcid) {
2621		u32 vcid_addr, pcid_addr, offset;
2622		int i;
2623
2624		vcid--;
2625
2626		if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2627			u32 new_vcid;
2628
2629			vcid_addr = GET_PCID_ADDR(vcid);
2630			if (vcid & 0x8) {
2631				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2632			}
2633			else {
2634				new_vcid = vcid;
2635			}
2636			pcid_addr = GET_PCID_ADDR(new_vcid);
2637		}
2638		else {
2639	    		vcid_addr = GET_CID_ADDR(vcid);
2640			pcid_addr = vcid_addr;
2641		}
2642
2643		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2644			vcid_addr += (i << PHY_CTX_SHIFT);
2645			pcid_addr += (i << PHY_CTX_SHIFT);
2646
2647			BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2648			BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2649
2650			/* Zero out the context. */
2651			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2652				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2653		}
2654	}
2655}
2656
2657static int
2658bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2659{
2660	u16 *good_mbuf;
2661	u32 good_mbuf_cnt;
2662	u32 val;
2663
2664	good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL);
2665	if (!good_mbuf)
2666		return -ENOMEM;
2667
2668	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2669		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2670
2671	good_mbuf_cnt = 0;
2672
2673	/* Allocate a bunch of mbufs and save the good ones in an array. */
2674	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2676		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2677				BNX2_RBUF_COMMAND_ALLOC_REQ);
2678
2679		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2680
2681		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2682
2683		/* The addresses with Bit 9 set are bad memory blocks. */
2684		if (!(val & (1 << 9))) {
2685			good_mbuf[good_mbuf_cnt] = (u16) val;
2686			good_mbuf_cnt++;
2687		}
2688
2689		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2690	}
2691
2692	/* Free the good ones back to the mbuf pool thus discarding
2693	 * all the bad ones. */
2694	while (good_mbuf_cnt) {
2695		good_mbuf_cnt--;
2696
2697		val = good_mbuf[good_mbuf_cnt];
2698		val = (val << 9) | val | 1;
2699
2700		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2701	}
2702	kfree(good_mbuf);
2703	return 0;
2704}
2705
2706static void
2707bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2708{
2709	u32 val;
2710
2711	val = (mac_addr[0] << 8) | mac_addr[1];
2712
2713	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2714
2715	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2716		(mac_addr[4] << 8) | mac_addr[5];
2717
2718	BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2719}
2720
2721static inline int
2722bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2723{
2724	dma_addr_t mapping;
2725	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2726	struct bnx2_rx_bd *rxbd =
2727		&rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2728	struct page *page = alloc_page(gfp);
2729
2730	if (!page)
2731		return -ENOMEM;
2732	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733			       PCI_DMA_FROMDEVICE);
2734	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2735		__free_page(page);
2736		return -EIO;
2737	}
2738
2739	rx_pg->page = page;
2740	dma_unmap_addr_set(rx_pg, mapping, mapping);
2741	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743	return 0;
2744}
2745
2746static void
2747bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2748{
2749	struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2750	struct page *page = rx_pg->page;
2751
2752	if (!page)
2753		return;
2754
2755	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2756		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2757
2758	__free_page(page);
2759	rx_pg->page = NULL;
2760}
2761
2762static inline int
2763bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2764{
2765	u8 *data;
2766	struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2767	dma_addr_t mapping;
2768	struct bnx2_rx_bd *rxbd =
2769		&rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2770
2771	data = kmalloc(bp->rx_buf_size, gfp);
2772	if (!data)
2773		return -ENOMEM;
2774
2775	mapping = dma_map_single(&bp->pdev->dev,
2776				 get_l2_fhdr(data),
2777				 bp->rx_buf_use_size,
2778				 PCI_DMA_FROMDEVICE);
2779	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2780		kfree(data);
2781		return -EIO;
2782	}
2783
2784	rx_buf->data = data;
2785	dma_unmap_addr_set(rx_buf, mapping, mapping);
2786
2787	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2788	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2789
2790	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2791
2792	return 0;
2793}
2794
2795static int
2796bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2797{
2798	struct status_block *sblk = bnapi->status_blk.msi;
2799	u32 new_link_state, old_link_state;
2800	int is_set = 1;
2801
2802	new_link_state = sblk->status_attn_bits & event;
2803	old_link_state = sblk->status_attn_bits_ack & event;
2804	if (new_link_state != old_link_state) {
2805		if (new_link_state)
2806			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807		else
2808			BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2809	} else
2810		is_set = 0;
2811
2812	return is_set;
2813}
2814
2815static void
2816bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817{
2818	spin_lock(&bp->phy_lock);
2819
2820	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2821		bnx2_set_link(bp);
2822	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2823		bnx2_set_remote_link(bp);
2824
2825	spin_unlock(&bp->phy_lock);
2826
2827}
2828
2829static inline u16
2830bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2831{
2832	u16 cons;
2833
2834	cons = READ_ONCE(*bnapi->hw_tx_cons_ptr);
2835
2836	if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2837		cons++;
2838	return cons;
2839}
2840
2841static int
2842bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2843{
2844	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2845	u16 hw_cons, sw_cons, sw_ring_cons;
2846	int tx_pkt = 0, index;
2847	unsigned int tx_bytes = 0;
2848	struct netdev_queue *txq;
2849
2850	index = (bnapi - bp->bnx2_napi);
2851	txq = netdev_get_tx_queue(bp->dev, index);
2852
2853	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2854	sw_cons = txr->tx_cons;
2855
2856	while (sw_cons != hw_cons) {
2857		struct bnx2_sw_tx_bd *tx_buf;
2858		struct sk_buff *skb;
2859		int i, last;
2860
2861		sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2862
2863		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2864		skb = tx_buf->skb;
2865
2866		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2867		prefetch(&skb->end);
2868
2869		/* partial BD completions possible with TSO packets */
2870		if (tx_buf->is_gso) {
2871			u16 last_idx, last_ring_idx;
2872
2873			last_idx = sw_cons + tx_buf->nr_frags + 1;
2874			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2875			if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2876				last_idx++;
2877			}
2878			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2879				break;
2880			}
2881		}
2882
2883		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2884			skb_headlen(skb), PCI_DMA_TODEVICE);
2885
2886		tx_buf->skb = NULL;
2887		last = tx_buf->nr_frags;
2888
2889		for (i = 0; i < last; i++) {
2890			struct bnx2_sw_tx_bd *tx_buf;
2891
2892			sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2893
2894			tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2895			dma_unmap_page(&bp->pdev->dev,
2896				dma_unmap_addr(tx_buf, mapping),
2897				skb_frag_size(&skb_shinfo(skb)->frags[i]),
2898				PCI_DMA_TODEVICE);
2899		}
2900
2901		sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2902
2903		tx_bytes += skb->len;
2904		dev_kfree_skb_any(skb);
2905		tx_pkt++;
2906		if (tx_pkt == budget)
2907			break;
2908
2909		if (hw_cons == sw_cons)
2910			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2911	}
2912
2913	netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
2914	txr->hw_tx_cons = hw_cons;
2915	txr->tx_cons = sw_cons;
2916
2917	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2918	 * before checking for netif_tx_queue_stopped().  Without the
2919	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2920	 * will miss it and cause the queue to be stopped forever.
2921	 */
2922	smp_mb();
2923
2924	if (unlikely(netif_tx_queue_stopped(txq)) &&
2925		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2926		__netif_tx_lock(txq, smp_processor_id());
2927		if ((netif_tx_queue_stopped(txq)) &&
2928		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2929			netif_tx_wake_queue(txq);
2930		__netif_tx_unlock(txq);
2931	}
2932
2933	return tx_pkt;
2934}
2935
2936static void
2937bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2938			struct sk_buff *skb, int count)
2939{
2940	struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2941	struct bnx2_rx_bd *cons_bd, *prod_bd;
2942	int i;
2943	u16 hw_prod, prod;
2944	u16 cons = rxr->rx_pg_cons;
2945
2946	cons_rx_pg = &rxr->rx_pg_ring[cons];
2947
2948	/* The caller was unable to allocate a new page to replace the
2949	 * last one in the frags array, so we need to recycle that page
2950	 * and then free the skb.
2951	 */
2952	if (skb) {
2953		struct page *page;
2954		struct skb_shared_info *shinfo;
2955
2956		shinfo = skb_shinfo(skb);
2957		shinfo->nr_frags--;
2958		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
2959		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);
2960
2961		cons_rx_pg->page = page;
2962		dev_kfree_skb(skb);
2963	}
2964
2965	hw_prod = rxr->rx_pg_prod;
2966
2967	for (i = 0; i < count; i++) {
2968		prod = BNX2_RX_PG_RING_IDX(hw_prod);
2969
2970		prod_rx_pg = &rxr->rx_pg_ring[prod];
2971		cons_rx_pg = &rxr->rx_pg_ring[cons];
2972		cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2973						[BNX2_RX_IDX(cons)];
2974		prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2975						[BNX2_RX_IDX(prod)];
2976
2977		if (prod != cons) {
2978			prod_rx_pg->page = cons_rx_pg->page;
2979			cons_rx_pg->page = NULL;
2980			dma_unmap_addr_set(prod_rx_pg, mapping,
2981				dma_unmap_addr(cons_rx_pg, mapping));
2982
2983			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2984			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2985
2986		}
2987		cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2988		hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2989	}
2990	rxr->rx_pg_prod = hw_prod;
2991	rxr->rx_pg_cons = cons;
2992}
2993
2994static inline void
2995bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2996		   u8 *data, u16 cons, u16 prod)
2997{
2998	struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2999	struct bnx2_rx_bd *cons_bd, *prod_bd;
3000
3001	cons_rx_buf = &rxr->rx_buf_ring[cons];
3002	prod_rx_buf = &rxr->rx_buf_ring[prod];
3003
3004	dma_sync_single_for_device(&bp->pdev->dev,
3005		dma_unmap_addr(cons_rx_buf, mapping),
3006		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
3007
3008	rxr->rx_prod_bseq += bp->rx_buf_use_size;
3009
3010	prod_rx_buf->data = data;
3011
3012	if (cons == prod)
3013		return;
3014
3015	dma_unmap_addr_set(prod_rx_buf, mapping,
3016			dma_unmap_addr(cons_rx_buf, mapping));
3017
3018	cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
3019	prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
3020	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
3021	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
3022}
3023
3024static struct sk_buff *
3025bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3026	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
3027	    u32 ring_idx)
3028{
3029	int err;
3030	u16 prod = ring_idx & 0xffff;
3031	struct sk_buff *skb;
3032
3033	err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3034	if (unlikely(err)) {
3035		bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3036error:
3037		if (hdr_len) {
3038			unsigned int raw_len = len + 4;
3039			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
3040
3041			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3042		}
3043		return NULL;
3044	}
3045
3046	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3047			 PCI_DMA_FROMDEVICE);
3048	skb = build_skb(data, 0);
3049	if (!skb) {
3050		kfree(data);
3051		goto error;
3052	}
3053	skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
3054	if (hdr_len == 0) {
3055		skb_put(skb, len);
3056		return skb;
3057	} else {
3058		unsigned int i, frag_len, frag_size, pages;
3059		struct bnx2_sw_pg *rx_pg;
3060		u16 pg_cons = rxr->rx_pg_cons;
3061		u16 pg_prod = rxr->rx_pg_prod;
3062
3063		frag_size = len + 4 - hdr_len;
3064		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3065		skb_put(skb, hdr_len);
3066
3067		for (i = 0; i < pages; i++) {
3068			dma_addr_t mapping_old;
3069
3070			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3071			if (unlikely(frag_len <= 4)) {
3072				unsigned int tail = 4 - frag_len;
3073
3074				rxr->rx_pg_cons = pg_cons;
3075				rxr->rx_pg_prod = pg_prod;
3076				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3077							pages - i);
3078				skb->len -= tail;
3079				if (i == 0) {
3080					skb->tail -= tail;
3081				} else {
3082					skb_frag_t *frag =
3083						&skb_shinfo(skb)->frags[i - 1];
3084					skb_frag_size_sub(frag, tail);
3085					skb->data_len -= tail;
3086				}
3087				return skb;
3088			}
3089			rx_pg = &rxr->rx_pg_ring[pg_cons];
3090
3091			/* Don't unmap yet.  If we're unable to allocate a new
3092			 * page, we need to recycle the page and the DMA addr.
3093			 */
3094			mapping_old = dma_unmap_addr(rx_pg, mapping);
3095			if (i == pages - 1)
3096				frag_len -= 4;
3097
3098			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3099			rx_pg->page = NULL;
3100
3101			err = bnx2_alloc_rx_page(bp, rxr,
3102						 BNX2_RX_PG_RING_IDX(pg_prod),
3103						 GFP_ATOMIC);
3104			if (unlikely(err)) {
3105				rxr->rx_pg_cons = pg_cons;
3106				rxr->rx_pg_prod = pg_prod;
3107				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3108							pages - i);
3109				return NULL;
3110			}
3111
3112			dma_unmap_page(&bp->pdev->dev, mapping_old,
3113				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3114
3115			frag_size -= frag_len;
3116			skb->data_len += frag_len;
3117			skb->truesize += PAGE_SIZE;
3118			skb->len += frag_len;
3119
3120			pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3121			pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3122		}
3123		rxr->rx_pg_prod = pg_prod;
3124		rxr->rx_pg_cons = pg_cons;
3125	}
3126	return skb;
3127}
3128
3129static inline u16
3130bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3131{
3132	u16 cons;
3133
3134	cons = READ_ONCE(*bnapi->hw_rx_cons_ptr);
3135
3136	if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3137		cons++;
3138	return cons;
3139}
3140
3141static int
3142bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3143{
3144	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3145	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3146	struct l2_fhdr *rx_hdr;
3147	int rx_pkt = 0, pg_ring_used = 0;
3148
3149	if (budget <= 0)
3150		return rx_pkt;
3151
3152	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3153	sw_cons = rxr->rx_cons;
3154	sw_prod = rxr->rx_prod;
3155
3156	/* Memory barrier necessary as speculative reads of the rx
3157	 * buffer can be ahead of the index in the status block
3158	 */
3159	rmb();
3160	while (sw_cons != hw_cons) {
3161		unsigned int len, hdr_len;
3162		u32 status;
3163		struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3164		struct sk_buff *skb;
3165		dma_addr_t dma_addr;
3166		u8 *data;
3167		u16 next_ring_idx;
3168
3169		sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3170		sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3171
3172		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3173		data = rx_buf->data;
3174		rx_buf->data = NULL;
3175
3176		rx_hdr = get_l2_fhdr(data);
3177		prefetch(rx_hdr);
3178
3179		dma_addr = dma_unmap_addr(rx_buf, mapping);
3180
3181		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3182			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3183			PCI_DMA_FROMDEVICE);
3184
3185		next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3186		next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3187		prefetch(get_l2_fhdr(next_rx_buf->data));
3188
3189		len = rx_hdr->l2_fhdr_pkt_len;
3190		status = rx_hdr->l2_fhdr_status;
3191
3192		hdr_len = 0;
3193		if (status & L2_FHDR_STATUS_SPLIT) {
3194			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3195			pg_ring_used = 1;
3196		} else if (len > bp->rx_jumbo_thresh) {
3197			hdr_len = bp->rx_jumbo_thresh;
3198			pg_ring_used = 1;
3199		}
3200
3201		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3202				       L2_FHDR_ERRORS_PHY_DECODE |
3203				       L2_FHDR_ERRORS_ALIGNMENT |
3204				       L2_FHDR_ERRORS_TOO_SHORT |
3205				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3206
3207			bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3208					  sw_ring_prod);
3209			if (pg_ring_used) {
3210				int pages;
3211
3212				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3213
3214				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3215			}
3216			goto next_rx;
3217		}
3218
3219		len -= 4;
3220
3221		if (len <= bp->rx_copy_thresh) {
3222			skb = netdev_alloc_skb(bp->dev, len + 6);
3223			if (!skb) {
3224				bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3225						  sw_ring_prod);
3226				goto next_rx;
3227			}
3228
3229			/* aligned copy */
3230			memcpy(skb->data,
3231			       (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
3232			       len + 6);
3233			skb_reserve(skb, 6);
3234			skb_put(skb, len);
3235
3236			bnx2_reuse_rx_data(bp, rxr, data,
3237				sw_ring_cons, sw_ring_prod);
3238
3239		} else {
3240			skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3241					  (sw_ring_cons << 16) | sw_ring_prod);
3242			if (!skb)
3243				goto next_rx;
3244		}
3245		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3246		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rx_hdr->l2_fhdr_vlan_tag);
3248
3249		skb->protocol = eth_type_trans(skb, bp->dev);
3250
3251		if (len > (bp->dev->mtu + ETH_HLEN) &&
3252		    skb->protocol != htons(0x8100) &&
3253		    skb->protocol != htons(ETH_P_8021AD)) {
3254
3255			dev_kfree_skb(skb);
3256			goto next_rx;
3257
3258		}
3259
3260		skb_checksum_none_assert(skb);
3261		if ((bp->dev->features & NETIF_F_RXCSUM) &&
3262			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3263			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3264
3265			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3266					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3267				skb->ip_summed = CHECKSUM_UNNECESSARY;
3268		}
3269		if ((bp->dev->features & NETIF_F_RXHASH) &&
3270		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3271		     L2_FHDR_STATUS_USE_RXHASH))
3272			skb_set_hash(skb, rx_hdr->l2_fhdr_hash,
3273				     PKT_HASH_TYPE_L3);
3274
3275		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3276		napi_gro_receive(&bnapi->napi, skb);
3277		rx_pkt++;
3278
3279next_rx:
3280		sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3281		sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3282
3283		if (rx_pkt == budget)
3284			break;
3285
3286		/* Refresh hw_cons to see if there is new work */
3287		if (sw_cons == hw_cons) {
3288			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3289			rmb();
3290		}
3291	}
3292	rxr->rx_cons = sw_cons;
3293	rxr->rx_prod = sw_prod;
3294
3295	if (pg_ring_used)
3296		BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3297
3298	BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3299
3300	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3301
3302	return rx_pkt;
3303
3304}
3305
3306/* MSI ISR - The only difference between this and the INTx ISR
3307 * is that the MSI interrupt is always serviced.
3308 */
3309static irqreturn_t
3310bnx2_msi(int irq, void *dev_instance)
3311{
3312	struct bnx2_napi *bnapi = dev_instance;
3313	struct bnx2 *bp = bnapi->bp;
3314
3315	prefetch(bnapi->status_blk.msi);
3316	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3317		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3318		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3319
3320	/* Return here if interrupt is disabled. */
3321	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3322		return IRQ_HANDLED;
3323
3324	napi_schedule(&bnapi->napi);
3325
3326	return IRQ_HANDLED;
3327}
3328
3329static irqreturn_t
3330bnx2_msi_1shot(int irq, void *dev_instance)
3331{
3332	struct bnx2_napi *bnapi = dev_instance;
3333	struct bnx2 *bp = bnapi->bp;
3334
3335	prefetch(bnapi->status_blk.msi);
3336
3337	/* Return here if interrupt is disabled. */
3338	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3339		return IRQ_HANDLED;
3340
3341	napi_schedule(&bnapi->napi);
3342
3343	return IRQ_HANDLED;
3344}
3345
3346static irqreturn_t
3347bnx2_interrupt(int irq, void *dev_instance)
3348{
3349	struct bnx2_napi *bnapi = dev_instance;
3350	struct bnx2 *bp = bnapi->bp;
3351	struct status_block *sblk = bnapi->status_blk.msi;
3352
3353	/* When using INTx, it is possible for the interrupt to arrive
3354	 * at the CPU before the status block posted prior to the
3355	 * interrupt. Reading a register will flush the status block.
3356	 * When using MSI, the MSI message will always complete after
3357	 * the status block write.
3358	 */
3359	if ((sblk->status_idx == bnapi->last_status_idx) &&
3360	    (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3361	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3362		return IRQ_NONE;
3363
3364	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3365		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3366		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3367
3368	/* Read back to deassert IRQ immediately to avoid too many
3369	 * spurious interrupts.
3370	 */
3371	BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372
3373	/* Return here if interrupt is shared and is disabled. */
3374	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3375		return IRQ_HANDLED;
3376
3377	if (napi_schedule_prep(&bnapi->napi)) {
3378		bnapi->last_status_idx = sblk->status_idx;
3379		__napi_schedule(&bnapi->napi);
3380	}
3381
3382	return IRQ_HANDLED;
3383}
3384
3385static inline int
3386bnx2_has_fast_work(struct bnx2_napi *bnapi)
3387{
3388	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3389	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3390
3391	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3392	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3393		return 1;
3394	return 0;
3395}
3396
3397#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3398				 STATUS_ATTN_BITS_TIMER_ABORT)
3399
3400static inline int
3401bnx2_has_work(struct bnx2_napi *bnapi)
3402{
3403	struct status_block *sblk = bnapi->status_blk.msi;
3404
3405	if (bnx2_has_fast_work(bnapi))
3406		return 1;
3407
3408#ifdef BCM_CNIC
3409	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3410		return 1;
3411#endif
3412
3413	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3414	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3415		return 1;
3416
3417	return 0;
3418}
3419
3420static void
3421bnx2_chk_missed_msi(struct bnx2 *bp)
3422{
3423	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3424	u32 msi_ctrl;
3425
3426	if (bnx2_has_work(bnapi)) {
3427		msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3428		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3429			return;
3430
3431		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3432			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3433				~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3434			BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3435			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3436		}
3437	}
3438
3439	bp->idle_chk_status_idx = bnapi->last_status_idx;
3440}
3441
3442#ifdef BCM_CNIC
3443static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3444{
3445	struct cnic_ops *c_ops;
3446
3447	if (!bnapi->cnic_present)
3448		return;
3449
3450	rcu_read_lock();
3451	c_ops = rcu_dereference(bp->cnic_ops);
3452	if (c_ops)
3453		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3454						      bnapi->status_blk.msi);
3455	rcu_read_unlock();
3456}
3457#endif
3458
3459static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3460{
3461	struct status_block *sblk = bnapi->status_blk.msi;
3462	u32 status_attn_bits = sblk->status_attn_bits;
3463	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3464
3465	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3466	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3467
3468		bnx2_phy_int(bp, bnapi);
3469
3470		/* This is needed to take care of transient status
3471		 * during link changes.
3472		 */
3473		BNX2_WR(bp, BNX2_HC_COMMAND,
3474			bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3475		BNX2_RD(bp, BNX2_HC_COMMAND);
3476	}
3477}
3478
3479static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3480			  int work_done, int budget)
3481{
3482	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3483	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3484
3485	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3486		bnx2_tx_int(bp, bnapi, 0);
3487
3488	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3489		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3490
3491	return work_done;
3492}
3493
3494static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3495{
3496	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3497	struct bnx2 *bp = bnapi->bp;
3498	int work_done = 0;
3499	struct status_block_msix *sblk = bnapi->status_blk.msix;
3500
3501	while (1) {
3502		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3503		if (unlikely(work_done >= budget))
3504			break;
3505
3506		bnapi->last_status_idx = sblk->status_idx;
3507		/* status idx must be read before checking for more work. */
3508		rmb();
3509		if (likely(!bnx2_has_fast_work(bnapi))) {
3510
3511			napi_complete_done(napi, work_done);
3512			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3513				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3514				bnapi->last_status_idx);
3515			break;
3516		}
3517	}
3518	return work_done;
3519}
3520
3521static int bnx2_poll(struct napi_struct *napi, int budget)
3522{
3523	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3524	struct bnx2 *bp = bnapi->bp;
3525	int work_done = 0;
3526	struct status_block *sblk = bnapi->status_blk.msi;
3527
3528	while (1) {
3529		bnx2_poll_link(bp, bnapi);
3530
3531		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532
3533#ifdef BCM_CNIC
3534		bnx2_poll_cnic(bp, bnapi);
3535#endif
3536
3537		/* bnapi->last_status_idx is used below to tell the hw how
3538		 * much work has been processed, so we must read it before
3539		 * checking for more work.
3540		 */
3541		bnapi->last_status_idx = sblk->status_idx;
3542
3543		if (unlikely(work_done >= budget))
3544			break;
3545
3546		rmb();
3547		if (likely(!bnx2_has_work(bnapi))) {
3548			napi_complete_done(napi, work_done);
3549			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3550				BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3551					BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3552					bnapi->last_status_idx);
3553				break;
3554			}
3555			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3556				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3557				BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3558				bnapi->last_status_idx);
3559
3560			BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3561				BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3562				bnapi->last_status_idx);
3563			break;
3564		}
3565	}
3566
3567	return work_done;
3568}
3569
3570/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3571 * from set_multicast.
3572 */
3573static void
3574bnx2_set_rx_mode(struct net_device *dev)
3575{
3576	struct bnx2 *bp = netdev_priv(dev);
3577	u32 rx_mode, sort_mode;
3578	struct netdev_hw_addr *ha;
3579	int i;
3580
3581	if (!netif_running(dev))
3582		return;
3583
3584	spin_lock_bh(&bp->phy_lock);
3585
3586	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3587				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3588	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3589	if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3590	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3591		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3592	if (dev->flags & IFF_PROMISC) {
3593		/* Promiscuous mode. */
3594		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3595		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3596			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3597	}
3598	else if (dev->flags & IFF_ALLMULTI) {
3599		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3600			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3601				0xffffffff);
3602		}
3603		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3604	}
3605	else {
3606		/* Accept one or more multicast(s). */
3607		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3608		u32 regidx;
3609		u32 bit;
3610		u32 crc;
3611
3612		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3613
3614		netdev_for_each_mc_addr(ha, dev) {
3615			crc = ether_crc_le(ETH_ALEN, ha->addr);
3616			bit = crc & 0xff;
3617			regidx = (bit & 0xe0) >> 5;
3618			bit &= 0x1f;
3619			mc_filter[regidx] |= (1 << bit);
3620		}
3621
3622		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3623			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3624				mc_filter[i]);
3625		}
3626
3627		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3628	}
3629
3630	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3631		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3632		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3633			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3634	} else if (!(dev->flags & IFF_PROMISC)) {
3635		/* Add all entries into to the match filter list */
3636		i = 0;
3637		netdev_for_each_uc_addr(ha, dev) {
3638			bnx2_set_mac_addr(bp, ha->addr,
3639					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3640			sort_mode |= (1 <<
3641				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3642			i++;
3643		}
3644
3645	}
3646
3647	if (rx_mode != bp->rx_mode) {
3648		bp->rx_mode = rx_mode;
3649		BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650	}
3651
3652	BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3653	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3654	BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3655
3656	spin_unlock_bh(&bp->phy_lock);
3657}
3658
3659static int
3660check_fw_section(const struct firmware *fw,
3661		 const struct bnx2_fw_file_section *section,
3662		 u32 alignment, bool non_empty)
3663{
3664	u32 offset = be32_to_cpu(section->offset);
3665	u32 len = be32_to_cpu(section->len);
3666
3667	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3668		return -EINVAL;
3669	if ((non_empty && len == 0) || len > fw->size - offset ||
3670	    len & (alignment - 1))
3671		return -EINVAL;
3672	return 0;
3673}
3674
3675static int
3676check_mips_fw_entry(const struct firmware *fw,
3677		    const struct bnx2_mips_fw_file_entry *entry)
3678{
3679	if (check_fw_section(fw, &entry->text, 4, true) ||
3680	    check_fw_section(fw, &entry->data, 4, false) ||
3681	    check_fw_section(fw, &entry->rodata, 4, false))
3682		return -EINVAL;
3683	return 0;
3684}
3685
3686static void bnx2_release_firmware(struct bnx2 *bp)
3687{
3688	if (bp->rv2p_firmware) {
3689		release_firmware(bp->mips_firmware);
3690		release_firmware(bp->rv2p_firmware);
3691		bp->rv2p_firmware = NULL;
3692	}
3693}
3694
3695static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3696{
3697	const char *mips_fw_file, *rv2p_fw_file;
3698	const struct bnx2_mips_fw_file *mips_fw;
3699	const struct bnx2_rv2p_fw_file *rv2p_fw;
3700	int rc;
3701
3702	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3703		mips_fw_file = FW_MIPS_FILE_09;
3704		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3705		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3706			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3707		else
3708			rv2p_fw_file = FW_RV2P_FILE_09;
3709	} else {
3710		mips_fw_file = FW_MIPS_FILE_06;
3711		rv2p_fw_file = FW_RV2P_FILE_06;
3712	}
3713
3714	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3715	if (rc) {
3716		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3717		goto out;
3718	}
3719
3720	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3721	if (rc) {
3722		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3723		goto err_release_mips_firmware;
3724	}
3725	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3726	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3727	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3728	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3729	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3730	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3731	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3732	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3733		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3734		rc = -EINVAL;
3735		goto err_release_firmware;
3736	}
3737	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3738	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3739	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3740		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3741		rc = -EINVAL;
3742		goto err_release_firmware;
3743	}
3744out:
3745	return rc;
3746
3747err_release_firmware:
3748	release_firmware(bp->rv2p_firmware);
3749	bp->rv2p_firmware = NULL;
3750err_release_mips_firmware:
3751	release_firmware(bp->mips_firmware);
3752	goto out;
3753}
3754
3755static int bnx2_request_firmware(struct bnx2 *bp)
3756{
3757	return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3758}
3759
3760static u32
3761rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3762{
3763	switch (idx) {
3764	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3765		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3766		rv2p_code |= RV2P_BD_PAGE_SIZE;
3767		break;
3768	}
3769	return rv2p_code;
3770}
3771
3772static int
3773load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3774	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3775{
3776	u32 rv2p_code_len, file_offset;
3777	__be32 *rv2p_code;
3778	int i;
3779	u32 val, cmd, addr;
3780
3781	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3782	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3783
3784	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3785
3786	if (rv2p_proc == RV2P_PROC1) {
3787		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3788		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3789	} else {
3790		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3791		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3792	}
3793
3794	for (i = 0; i < rv2p_code_len; i += 8) {
3795		BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3796		rv2p_code++;
3797		BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3798		rv2p_code++;
3799
3800		val = (i / 8) | cmd;
3801		BNX2_WR(bp, addr, val);
3802	}
3803
3804	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3805	for (i = 0; i < 8; i++) {
3806		u32 loc, code;
3807
3808		loc = be32_to_cpu(fw_entry->fixup[i]);
3809		if (loc && ((loc * 4) < rv2p_code_len)) {
3810			code = be32_to_cpu(*(rv2p_code + loc - 1));
3811			BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812			code = be32_to_cpu(*(rv2p_code + loc));
3813			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3814			BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815
3816			val = (loc / 2) | cmd;
3817			BNX2_WR(bp, addr, val);
3818		}
3819	}
3820
3821	/* Reset the processor, un-stall is done later. */
3822	if (rv2p_proc == RV2P_PROC1) {
3823		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824	}
3825	else {
3826		BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3827	}
3828
3829	return 0;
3830}
3831
3832static int
3833load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3834	    const struct bnx2_mips_fw_file_entry *fw_entry)
3835{
3836	u32 addr, len, file_offset;
3837	__be32 *data;
3838	u32 offset;
3839	u32 val;
3840
3841	/* Halt the CPU. */
3842	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3843	val |= cpu_reg->mode_value_halt;
3844	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3845	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3846
3847	/* Load the Text area. */
3848	addr = be32_to_cpu(fw_entry->text.addr);
3849	len = be32_to_cpu(fw_entry->text.len);
3850	file_offset = be32_to_cpu(fw_entry->text.offset);
3851	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3852
3853	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3854	if (len) {
3855		int j;
3856
3857		for (j = 0; j < (len / 4); j++, offset += 4)
3858			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3859	}
3860
3861	/* Load the Data area. */
3862	addr = be32_to_cpu(fw_entry->data.addr);
3863	len = be32_to_cpu(fw_entry->data.len);
3864	file_offset = be32_to_cpu(fw_entry->data.offset);
3865	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3866
3867	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3868	if (len) {
3869		int j;
3870
3871		for (j = 0; j < (len / 4); j++, offset += 4)
3872			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3873	}
3874
3875	/* Load the Read-Only area. */
3876	addr = be32_to_cpu(fw_entry->rodata.addr);
3877	len = be32_to_cpu(fw_entry->rodata.len);
3878	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3879	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3880
3881	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3882	if (len) {
3883		int j;
3884
3885		for (j = 0; j < (len / 4); j++, offset += 4)
3886			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3887	}
3888
3889	/* Clear the pre-fetch instruction. */
3890	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891
3892	val = be32_to_cpu(fw_entry->start_addr);
3893	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894
3895	/* Start the CPU. */
3896	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3897	val &= ~cpu_reg->mode_value_halt;
3898	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3899	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3900
3901	return 0;
3902}
3903
3904static int
3905bnx2_init_cpus(struct bnx2 *bp)
3906{
3907	const struct bnx2_mips_fw_file *mips_fw =
3908		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3909	const struct bnx2_rv2p_fw_file *rv2p_fw =
3910		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3911	int rc;
3912
3913	/* Initialize the RV2P processor. */
3914	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3915	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3916
3917	/* Initialize the RX Processor. */
3918	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3919	if (rc)
3920		goto init_cpu_err;
3921
3922	/* Initialize the TX Processor. */
3923	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3924	if (rc)
3925		goto init_cpu_err;
3926
3927	/* Initialize the TX Patch-up Processor. */
3928	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3929	if (rc)
3930		goto init_cpu_err;
3931
3932	/* Initialize the Completion Processor. */
3933	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3934	if (rc)
3935		goto init_cpu_err;
3936
3937	/* Initialize the Command Processor. */
3938	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3939
3940init_cpu_err:
3941	return rc;
3942}
3943
3944static void
3945bnx2_setup_wol(struct bnx2 *bp)
3946{
3947	int i;
3948	u32 val, wol_msg;
3949
3950	if (bp->wol) {
3951		u32 advertising;
3952		u8 autoneg;
3953
3954		autoneg = bp->autoneg;
3955		advertising = bp->advertising;
3956
3957		if (bp->phy_port == PORT_TP) {
3958			bp->autoneg = AUTONEG_SPEED;
3959			bp->advertising = ADVERTISED_10baseT_Half |
3960				ADVERTISED_10baseT_Full |
3961				ADVERTISED_100baseT_Half |
3962				ADVERTISED_100baseT_Full |
3963				ADVERTISED_Autoneg;
3964		}
3965
3966		spin_lock_bh(&bp->phy_lock);
3967		bnx2_setup_phy(bp, bp->phy_port);
3968		spin_unlock_bh(&bp->phy_lock);
3969
3970		bp->autoneg = autoneg;
3971		bp->advertising = advertising;
3972
3973		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3974
3975		val = BNX2_RD(bp, BNX2_EMAC_MODE);
3976
3977		/* Enable port mode. */
3978		val &= ~BNX2_EMAC_MODE_PORT;
3979		val |= BNX2_EMAC_MODE_MPKT_RCVD |
3980		       BNX2_EMAC_MODE_ACPI_RCVD |
3981		       BNX2_EMAC_MODE_MPKT;
3982		if (bp->phy_port == PORT_TP) {
3983			val |= BNX2_EMAC_MODE_PORT_MII;
3984		} else {
3985			val |= BNX2_EMAC_MODE_PORT_GMII;
3986			if (bp->line_speed == SPEED_2500)
3987				val |= BNX2_EMAC_MODE_25G_MODE;
3988		}
3989
3990		BNX2_WR(bp, BNX2_EMAC_MODE, val);
3991
3992		/* receive all multicast */
3993		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3994			BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3995				0xffffffff);
3996		}
3997		BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3998
3999		val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
4000		BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
4001		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
4002		BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
4003
4004		/* Need to enable EMAC and RPM for WOL. */
4005		BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4006			BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4007			BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4008			BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4009
4010		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4011		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4012		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4013
4014		wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4015	} else {
4016			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4017	}
4018
4019	if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4020		u32 val;
4021
4022		wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
4023		if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4024			bnx2_fw_sync(bp, wol_msg, 1, 0);
4025			return;
4026		}
4027		/* Tell firmware not to power down the PHY yet, otherwise
4028		 * the chip will take a long time to respond to MMIO reads.
4029		 */
4030		val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4031		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4032			      val | BNX2_PORT_FEATURE_ASF_ENABLED);
4033		bnx2_fw_sync(bp, wol_msg, 1, 0);
4034		bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4035	}
4036
4037}
4038
4039static int
4040bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4041{
4042	switch (state) {
4043	case PCI_D0: {
4044		u32 val;
4045
4046		pci_enable_wake(bp->pdev, PCI_D0, false);
4047		pci_set_power_state(bp->pdev, PCI_D0);
4048
4049		val = BNX2_RD(bp, BNX2_EMAC_MODE);
4050		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
4051		val &= ~BNX2_EMAC_MODE_MPKT;
4052		BNX2_WR(bp, BNX2_EMAC_MODE, val);
4053
4054		val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4055		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4056		BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4057		break;
4058	}
4059	case PCI_D3hot: {
4060		bnx2_setup_wol(bp);
4061		pci_wake_from_d3(bp->pdev, bp->wol);
4062		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4063		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4064
4065			if (bp->wol)
4066				pci_set_power_state(bp->pdev, PCI_D3hot);
4067			break;
4068
4069		}
4070		if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4071			u32 val;
4072
4073			/* Tell firmware not to power down the PHY yet,
4074			 * otherwise the other port may not respond to
4075			 * MMIO reads.
4076			 */
4077			val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4078			val &= ~BNX2_CONDITION_PM_STATE_MASK;
4079			val |= BNX2_CONDITION_PM_STATE_UNPREP;
4080			bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4081		}
4082		pci_set_power_state(bp->pdev, PCI_D3hot);
4083
4084		/* No more memory access after this point until
4085		 * device is brought back to D0.
4086		 */
4087		break;
4088	}
4089	default:
4090		return -EINVAL;
4091	}
4092	return 0;
4093}
4094
4095static int
4096bnx2_acquire_nvram_lock(struct bnx2 *bp)
4097{
4098	u32 val;
4099	int j;
4100
4101	/* Request access to the flash interface. */
4102	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4103	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4104		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4105		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4106			break;
4107
4108		udelay(5);
4109	}
4110
4111	if (j >= NVRAM_TIMEOUT_COUNT)
4112		return -EBUSY;
4113
4114	return 0;
4115}
4116
4117static int
4118bnx2_release_nvram_lock(struct bnx2 *bp)
4119{
4120	int j;
4121	u32 val;
4122
4123	/* Relinquish nvram interface. */
4124	BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4125
4126	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127		val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4128		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4129			break;
4130
4131		udelay(5);
4132	}
4133
4134	if (j >= NVRAM_TIMEOUT_COUNT)
4135		return -EBUSY;
4136
4137	return 0;
4138}
4139
4140
4141static int
4142bnx2_enable_nvram_write(struct bnx2 *bp)
4143{
4144	u32 val;
4145
4146	val = BNX2_RD(bp, BNX2_MISC_CFG);
4147	BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4148
4149	if (bp->flash_info->flags & BNX2_NV_WREN) {
4150		int j;
4151
4152		BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153		BNX2_WR(bp, BNX2_NVM_COMMAND,
4154			BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4155
4156		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4157			udelay(5);
4158
4159			val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4160			if (val & BNX2_NVM_COMMAND_DONE)
4161				break;
4162		}
4163
4164		if (j >= NVRAM_TIMEOUT_COUNT)
4165			return -EBUSY;
4166	}
4167	return 0;
4168}
4169
4170static void
4171bnx2_disable_nvram_write(struct bnx2 *bp)
4172{
4173	u32 val;
4174
4175	val = BNX2_RD(bp, BNX2_MISC_CFG);
4176	BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4177}
4178
4179
4180static void
4181bnx2_enable_nvram_access(struct bnx2 *bp)
4182{
4183	u32 val;
4184
4185	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4186	/* Enable both bits, even on read. */
4187	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188		val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4189}
4190
4191static void
4192bnx2_disable_nvram_access(struct bnx2 *bp)
4193{
4194	u32 val;
4195
4196	val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4197	/* Disable both bits, even after read. */
4198	BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4199		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4200			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4201}
4202
4203static int
4204bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4205{
4206	u32 cmd;
4207	int j;
4208
4209	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4210		/* Buffered flash, no erase needed */
4211		return 0;
4212
4213	/* Build an erase command */
4214	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4215	      BNX2_NVM_COMMAND_DOIT;
4216
4217	/* Need to clear DONE bit separately. */
4218	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4219
4220	/* Address of the NVRAM to read from. */
4221	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4222
4223	/* Issue an erase command. */
4224	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4225
4226	/* Wait for completion. */
4227	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4228		u32 val;
4229
4230		udelay(5);
4231
4232		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4233		if (val & BNX2_NVM_COMMAND_DONE)
4234			break;
4235	}
4236
4237	if (j >= NVRAM_TIMEOUT_COUNT)
4238		return -EBUSY;
4239
4240	return 0;
4241}
4242
4243static int
4244bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4245{
4246	u32 cmd;
4247	int j;
4248
4249	/* Build the command word. */
4250	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4251
4252	/* Calculate an offset of a buffered flash, not needed for 5709. */
4253	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4254		offset = ((offset / bp->flash_info->page_size) <<
4255			   bp->flash_info->page_bits) +
4256			  (offset % bp->flash_info->page_size);
4257	}
4258
4259	/* Need to clear DONE bit separately. */
4260	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4261
4262	/* Address of the NVRAM to read from. */
4263	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4264
4265	/* Issue a read command. */
4266	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4267
4268	/* Wait for completion. */
4269	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4270		u32 val;
4271
4272		udelay(5);
4273
4274		val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4275		if (val & BNX2_NVM_COMMAND_DONE) {
4276			__be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4277			memcpy(ret_val, &v, 4);
4278			break;
4279		}
4280	}
4281	if (j >= NVRAM_TIMEOUT_COUNT)
4282		return -EBUSY;
4283
4284	return 0;
4285}
4286
4287
4288static int
4289bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4290{
4291	u32 cmd;
4292	__be32 val32;
4293	int j;
4294
4295	/* Build the command word. */
4296	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4297
4298	/* Calculate an offset of a buffered flash, not needed for 5709. */
4299	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4300		offset = ((offset / bp->flash_info->page_size) <<
4301			  bp->flash_info->page_bits) +
4302			 (offset % bp->flash_info->page_size);
4303	}
4304
4305	/* Need to clear DONE bit separately. */
4306	BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4307
4308	memcpy(&val32, val, 4);
4309
4310	/* Write the data. */
4311	BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4312
4313	/* Address of the NVRAM to write to. */
4314	BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4315
4316	/* Issue the write command. */
4317	BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4318
4319	/* Wait for completion. */
4320	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4321		udelay(5);
4322
4323		if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4324			break;
4325	}
4326	if (j >= NVRAM_TIMEOUT_COUNT)
4327		return -EBUSY;
4328
4329	return 0;
4330}
4331
4332static int
4333bnx2_init_nvram(struct bnx2 *bp)
4334{
4335	u32 val;
4336	int j, entry_count, rc = 0;
4337	const struct flash_spec *flash;
4338
4339	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4340		bp->flash_info = &flash_5709;
4341		goto get_flash_size;
4342	}
4343
4344	/* Determine the selected interface. */
4345	val = BNX2_RD(bp, BNX2_NVM_CFG1);
4346
4347	entry_count = ARRAY_SIZE(flash_table);
4348
4349	if (val & 0x40000000) {
4350
4351		/* Flash interface has been reconfigured */
4352		for (j = 0, flash = &flash_table[0]; j < entry_count;
4353		     j++, flash++) {
4354			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4355			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4356				bp->flash_info = flash;
4357				break;
4358			}
4359		}
4360	}
4361	else {
4362		u32 mask;
4363		/* Not yet been reconfigured */
4364
4365		if (val & (1 << 23))
4366			mask = FLASH_BACKUP_STRAP_MASK;
4367		else
4368			mask = FLASH_STRAP_MASK;
4369
4370		for (j = 0, flash = &flash_table[0]; j < entry_count;
4371			j++, flash++) {
4372
4373			if ((val & mask) == (flash->strapping & mask)) {
4374				bp->flash_info = flash;
4375
4376				/* Request access to the flash interface. */
4377				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4378					return rc;
4379
4380				/* Enable access to flash interface */
4381				bnx2_enable_nvram_access(bp);
4382
4383				/* Reconfigure the flash interface */
4384				BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4385				BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4386				BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4387				BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4388
4389				/* Disable access to flash interface */
4390				bnx2_disable_nvram_access(bp);
4391				bnx2_release_nvram_lock(bp);
4392
4393				break;
4394			}
4395		}
4396	} /* if (val & 0x40000000) */
4397
4398	if (j == entry_count) {
4399		bp->flash_info = NULL;
4400		pr_alert("Unknown flash/EEPROM type\n");
4401		return -ENODEV;
4402	}
4403
4404get_flash_size:
4405	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4406	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4407	if (val)
4408		bp->flash_size = val;
4409	else
4410		bp->flash_size = bp->flash_info->total_size;
4411
4412	return rc;
4413}
4414
4415static int
4416bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4417		int buf_size)
4418{
4419	int rc = 0;
4420	u32 cmd_flags, offset32, len32, extra;
4421
4422	if (buf_size == 0)
4423		return 0;
4424
4425	/* Request access to the flash interface. */
4426	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4427		return rc;
4428
4429	/* Enable access to flash interface */
4430	bnx2_enable_nvram_access(bp);
4431
4432	len32 = buf_size;
4433	offset32 = offset;
4434	extra = 0;
4435
4436	cmd_flags = 0;
4437
4438	if (offset32 & 3) {
4439		u8 buf[4];
4440		u32 pre_len;
4441
4442		offset32 &= ~3;
4443		pre_len = 4 - (offset & 3);
4444
4445		if (pre_len >= len32) {
4446			pre_len = len32;
4447			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4448				    BNX2_NVM_COMMAND_LAST;
4449		}
4450		else {
4451			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4452		}
4453
4454		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4455
4456		if (rc)
4457			return rc;
4458
4459		memcpy(ret_buf, buf + (offset & 3), pre_len);
4460
4461		offset32 += 4;
4462		ret_buf += pre_len;
4463		len32 -= pre_len;
4464	}
4465	if (len32 & 3) {
4466		extra = 4 - (len32 & 3);
4467		len32 = (len32 + 4) & ~3;
4468	}
4469
4470	if (len32 == 4) {
4471		u8 buf[4];
4472
4473		if (cmd_flags)
4474			cmd_flags = BNX2_NVM_COMMAND_LAST;
4475		else
4476			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4477				    BNX2_NVM_COMMAND_LAST;
4478
4479		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4480
4481		memcpy(ret_buf, buf, 4 - extra);
4482	}
4483	else if (len32 > 0) {
4484		u8 buf[4];
4485
4486		/* Read the first word. */
4487		if (cmd_flags)
4488			cmd_flags = 0;
4489		else
4490			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4491
4492		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4493
4494		/* Advance to the next dword. */
4495		offset32 += 4;
4496		ret_buf += 4;
4497		len32 -= 4;
4498
4499		while (len32 > 4 && rc == 0) {
4500			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4501
4502			/* Advance to the next dword. */
4503			offset32 += 4;
4504			ret_buf += 4;
4505			len32 -= 4;
4506		}
4507
4508		if (rc)
4509			return rc;
4510
4511		cmd_flags = BNX2_NVM_COMMAND_LAST;
4512		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4513
4514		memcpy(ret_buf, buf, 4 - extra);
4515	}
4516
4517	/* Disable access to flash interface */
4518	bnx2_disable_nvram_access(bp);
4519
4520	bnx2_release_nvram_lock(bp);
4521
4522	return rc;
4523}
4524
4525static int
4526bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4527		int buf_size)
4528{
4529	u32 written, offset32, len32;
4530	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4531	int rc = 0;
4532	int align_start, align_end;
4533
4534	buf = data_buf;
4535	offset32 = offset;
4536	len32 = buf_size;
4537	align_start = align_end = 0;
4538
4539	if ((align_start = (offset32 & 3))) {
4540		offset32 &= ~3;
4541		len32 += align_start;
4542		if (len32 < 4)
4543			len32 = 4;
4544		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4545			return rc;
4546	}
4547
4548	if (len32 & 3) {
4549		align_end = 4 - (len32 & 3);
4550		len32 += align_end;
4551		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4552			return rc;
4553	}
4554
4555	if (align_start || align_end) {
4556		align_buf = kmalloc(len32, GFP_KERNEL);
4557		if (!align_buf)
4558			return -ENOMEM;
4559		if (align_start) {
4560			memcpy(align_buf, start, 4);
4561		}
4562		if (align_end) {
4563			memcpy(align_buf + len32 - 4, end, 4);
4564		}
4565		memcpy(align_buf + align_start, data_buf, buf_size);
4566		buf = align_buf;
4567	}
4568
4569	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4570		flash_buffer = kmalloc(264, GFP_KERNEL);
4571		if (!flash_buffer) {
4572			rc = -ENOMEM;
4573			goto nvram_write_end;
4574		}
4575	}
4576
4577	written = 0;
4578	while ((written < len32) && (rc == 0)) {
4579		u32 page_start, page_end, data_start, data_end;
4580		u32 addr, cmd_flags;
4581		int i;
4582
4583	        /* Find the page_start addr */
4584		page_start = offset32 + written;
4585		page_start -= (page_start % bp->flash_info->page_size);
4586		/* Find the page_end addr */
4587		page_end = page_start + bp->flash_info->page_size;
4588		/* Find the data_start addr */
4589		data_start = (written == 0) ? offset32 : page_start;
4590		/* Find the data_end addr */
4591		data_end = (page_end > offset32 + len32) ?
4592			(offset32 + len32) : page_end;
4593
4594		/* Request access to the flash interface. */
4595		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4596			goto nvram_write_end;
4597
4598		/* Enable access to flash interface */
4599		bnx2_enable_nvram_access(bp);
4600
4601		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4602		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4603			int j;
4604
4605			/* Read the whole page into the buffer
4606			 * (non-buffer flash only) */
4607			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4608				if (j == (bp->flash_info->page_size - 4)) {
4609					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4610				}
4611				rc = bnx2_nvram_read_dword(bp,
4612					page_start + j,
4613					&flash_buffer[j],
4614					cmd_flags);
4615
4616				if (rc)
4617					goto nvram_write_end;
4618
4619				cmd_flags = 0;
4620			}
4621		}
4622
4623		/* Enable writes to flash interface (unlock write-protect) */
4624		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4625			goto nvram_write_end;
4626
4627		/* Loop to write back the buffer data from page_start to
4628		 * data_start */
4629		i = 0;
4630		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4631			/* Erase the page */
4632			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4633				goto nvram_write_end;
4634
4635			/* Re-enable the write again for the actual write */
4636			bnx2_enable_nvram_write(bp);
4637
4638			for (addr = page_start; addr < data_start;
4639				addr += 4, i += 4) {
4640
4641				rc = bnx2_nvram_write_dword(bp, addr,
4642					&flash_buffer[i], cmd_flags);
4643
4644				if (rc != 0)
4645					goto nvram_write_end;
4646
4647				cmd_flags = 0;
4648			}
4649		}
4650
4651		/* Loop to write the new data from data_start to data_end */
4652		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4653			if ((addr == page_end - 4) ||
4654				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4655				 (addr == data_end - 4))) {
4656
4657				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4658			}
4659			rc = bnx2_nvram_write_dword(bp, addr, buf,
4660				cmd_flags);
4661
4662			if (rc != 0)
4663				goto nvram_write_end;
4664
4665			cmd_flags = 0;
4666			buf += 4;
4667		}
4668
4669		/* Loop to write back the buffer data from data_end
4670		 * to page_end */
4671		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4672			for (addr = data_end; addr < page_end;
4673				addr += 4, i += 4) {
4674
4675				if (addr == page_end-4) {
4676					cmd_flags = BNX2_NVM_COMMAND_LAST;
4677				}
4678				rc = bnx2_nvram_write_dword(bp, addr,
4679					&flash_buffer[i], cmd_flags);
4680
4681				if (rc != 0)
4682					goto nvram_write_end;
4683
4684				cmd_flags = 0;
4685			}
4686		}
4687
4688		/* Disable writes to flash interface (lock write-protect) */
4689		bnx2_disable_nvram_write(bp);
4690
4691		/* Disable access to flash interface */
4692		bnx2_disable_nvram_access(bp);
4693		bnx2_release_nvram_lock(bp);
4694
4695		/* Increment written */
4696		written += data_end - data_start;
4697	}
4698
4699nvram_write_end:
4700	kfree(flash_buffer);
4701	kfree(align_buf);
4702	return rc;
4703}
4704
4705static void
4706bnx2_init_fw_cap(struct bnx2 *bp)
4707{
4708	u32 val, sig = 0;
4709
4710	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4711	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4712
4713	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4714		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4715
4716	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4717	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4718		return;
4719
4720	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4721		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4722		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4723	}
4724
4725	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4726	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4727		u32 link;
4728
4729		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4730
4731		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4732		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4733			bp->phy_port = PORT_FIBRE;
4734		else
4735			bp->phy_port = PORT_TP;
4736
4737		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4738		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4739	}
4740
4741	if (netif_running(bp->dev) && sig)
4742		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4743}
4744
4745static void
4746bnx2_setup_msix_tbl(struct bnx2 *bp)
4747{
4748	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4749
4750	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4751	BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4752}
4753
4754static void
4755bnx2_wait_dma_complete(struct bnx2 *bp)
4756{
4757	u32 val;
4758	int i;
4759
4760	/*
4761	 * Wait for the current PCI transaction to complete before
4762	 * issuing a reset.
4763	 */
4764	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4765	    (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4766		BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4767			BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4768			BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4769			BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4770			BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4771		val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4772		udelay(5);
4773	} else {  /* 5709 */
4774		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4775		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4776		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4777		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4778
4779		for (i = 0; i < 100; i++) {
4780			msleep(1);
4781			val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4782			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4783				break;
4784		}
4785	}
4786
4787	return;
4788}
4789
4790
4791static int
4792bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4793{
4794	u32 val;
4795	int i, rc = 0;
4796	u8 old_port;
4797
4798	/* Wait for the current PCI transaction to complete before
4799	 * issuing a reset. */
4800	bnx2_wait_dma_complete(bp);
4801
4802	/* Wait for the firmware to tell us it is ok to issue a reset. */
4803	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4804
4805	/* Deposit a driver reset signature so the firmware knows that
4806	 * this is a soft reset. */
4807	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4808		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4809
4810	/* Do a dummy read to force the chip to complete all current transaction
4811	 * before we issue a reset. */
4812	val = BNX2_RD(bp, BNX2_MISC_ID);
4813
4814	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4815		BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4816		BNX2_RD(bp, BNX2_MISC_COMMAND);
4817		udelay(5);
4818
4819		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4820		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4821
4822		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4823
4824	} else {
4825		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4826		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4827		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4828
4829		/* Chip reset. */
4830		BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4831
4832		/* Reading back any register after chip reset will hang the
4833		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4834		 * of margin for write posting.
4835		 */
4836		if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4837		    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4838			msleep(20);
4839
4840		/* Reset takes approximate 30 usec */
4841		for (i = 0; i < 10; i++) {
4842			val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4843			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4844				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4845				break;
4846			udelay(10);
4847		}
4848
4849		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4850			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4851			pr_err("Chip reset did not complete\n");
4852			return -EBUSY;
4853		}
4854	}
4855
4856	/* Make sure byte swapping is properly configured. */
4857	val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4858	if (val != 0x01020304) {
4859		pr_err("Chip not in correct endian mode\n");
4860		return -ENODEV;
4861	}
4862
4863	/* Wait for the firmware to finish its initialization. */
4864	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4865	if (rc)
4866		return rc;
4867
4868	spin_lock_bh(&bp->phy_lock);
4869	old_port = bp->phy_port;
4870	bnx2_init_fw_cap(bp);
4871	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4872	    old_port != bp->phy_port)
4873		bnx2_set_default_remote_link(bp);
4874	spin_unlock_bh(&bp->phy_lock);
4875
4876	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4877		/* Adjust the voltage regular to two steps lower.  The default
4878		 * of this register is 0x0000000e. */
4879		BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4880
4881		/* Remove bad rbuf memory from the free pool. */
4882		rc = bnx2_alloc_bad_rbuf(bp);
4883	}
4884
4885	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4886		bnx2_setup_msix_tbl(bp);
4887		/* Prevent MSIX table reads and write from timing out */
4888		BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4889			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4890	}
4891
4892	return rc;
4893}
4894
4895static int
4896bnx2_init_chip(struct bnx2 *bp)
4897{
4898	u32 val, mtu;
4899	int rc, i;
4900
4901	/* Make sure the interrupt is not active. */
4902	BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4903
4904	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4905	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4906#ifdef __BIG_ENDIAN
4907	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4908#endif
4909	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4910	      DMA_READ_CHANS << 12 |
4911	      DMA_WRITE_CHANS << 16;
4912
4913	val |= (0x2 << 20) | (1 << 11);
4914
4915	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4916		val |= (1 << 23);
4917
4918	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4919	    (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4920	    !(bp->flags & BNX2_FLAG_PCIX))
4921		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4922
4923	BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4924
4925	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4926		val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4927		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4928		BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4929	}
4930
4931	if (bp->flags & BNX2_FLAG_PCIX) {
4932		u16 val16;
4933
4934		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4935				     &val16);
4936		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4937				      val16 & ~PCI_X_CMD_ERO);
4938	}
4939
4940	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4941		BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4942		BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4943		BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4944
4945	/* Initialize context mapping and zero out the quick contexts.  The
4946	 * context block must have already been enabled. */
4947	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4948		rc = bnx2_init_5709_context(bp);
4949		if (rc)
4950			return rc;
4951	} else
4952		bnx2_init_context(bp);
4953
4954	if ((rc = bnx2_init_cpus(bp)) != 0)
4955		return rc;
4956
4957	bnx2_init_nvram(bp);
4958
4959	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4960
4961	val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4962	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4963	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4964	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4965		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4966		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4967			val |= BNX2_MQ_CONFIG_HALT_DIS;
4968	}
4969
4970	BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4971
4972	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4973	BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4974	BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4975
4976	val = (BNX2_PAGE_BITS - 8) << 24;
4977	BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4978
4979	/* Configure page size. */
4980	val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4981	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4982	val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4983	BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4984
4985	val = bp->mac_addr[0] +
4986	      (bp->mac_addr[1] << 8) +
4987	      (bp->mac_addr[2] << 16) +
4988	      bp->mac_addr[3] +
4989	      (bp->mac_addr[4] << 8) +
4990	      (bp->mac_addr[5] << 16);
4991	BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4992
4993	/* Program the MTU.  Also include 4 bytes for CRC32. */
4994	mtu = bp->dev->mtu;
4995	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4996	if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4))
4997		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4998	BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4999
5000	if (mtu < ETH_DATA_LEN)
5001		mtu = ETH_DATA_LEN;
5002
5003	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
5004	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
5005	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
5006
5007	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
5008	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5009		bp->bnx2_napi[i].last_status_idx = 0;
5010
5011	bp->idle_chk_status_idx = 0xffff;
5012
5013	/* Set up how to generate a link change interrupt. */
5014	BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
5015
5016	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5017		(u64) bp->status_blk_mapping & 0xffffffff);
5018	BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5019
5020	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5021		(u64) bp->stats_blk_mapping & 0xffffffff);
5022	BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5023		(u64) bp->stats_blk_mapping >> 32);
5024
5025	BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5026		(bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5027
5028	BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5029		(bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5030
5031	BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5032		(bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5033
5034	BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5035
5036	BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5037
5038	BNX2_WR(bp, BNX2_HC_COM_TICKS,
5039		(bp->com_ticks_int << 16) | bp->com_ticks);
5040
5041	BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5042		(bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5043
5044	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5045		BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5046	else
5047		BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5048	BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
5049
5050	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5051		val = BNX2_HC_CONFIG_COLLECT_STATS;
5052	else {
5053		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
5054		      BNX2_HC_CONFIG_COLLECT_STATS;
5055	}
5056
5057	if (bp->flags & BNX2_FLAG_USING_MSIX) {
5058		BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5059			BNX2_HC_MSIX_BIT_VECTOR_VAL);
5060
5061		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
5062	}
5063
5064	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5065		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5066
5067	BNX2_WR(bp, BNX2_HC_CONFIG, val);
5068
5069	if (bp->rx_ticks < 25)
5070		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5071	else
5072		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5073
5074	for (i = 1; i < bp->irq_nvecs; i++) {
5075		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5076			   BNX2_HC_SB_CONFIG_1;
5077
5078		BNX2_WR(bp, base,
5079			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5080			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5081			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5082
5083		BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5084			(bp->tx_quick_cons_trip_int << 16) |
5085			 bp->tx_quick_cons_trip);
5086
5087		BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5088			(bp->tx_ticks_int << 16) | bp->tx_ticks);
5089
5090		BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5091			(bp->rx_quick_cons_trip_int << 16) |
5092			bp->rx_quick_cons_trip);
5093
5094		BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5095			(bp->rx_ticks_int << 16) | bp->rx_ticks);
5096	}
5097
5098	/* Clear internal stats counters. */
5099	BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5100
5101	BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5102
5103	/* Initialize the receive filter. */
5104	bnx2_set_rx_mode(bp->dev);
5105
5106	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5107		val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5108		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5109		BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5110	}
5111	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5112			  1, 0);
5113
5114	BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5115	BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5116
5117	udelay(20);
5118
5119	bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5120
5121	return rc;
5122}
5123
5124static void
5125bnx2_clear_ring_states(struct bnx2 *bp)
5126{
5127	struct bnx2_napi *bnapi;
5128	struct bnx2_tx_ring_info *txr;
5129	struct bnx2_rx_ring_info *rxr;
5130	int i;
5131
5132	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5133		bnapi = &bp->bnx2_napi[i];
5134		txr = &bnapi->tx_ring;
5135		rxr = &bnapi->rx_ring;
5136
5137		txr->tx_cons = 0;
5138		txr->hw_tx_cons = 0;
5139		rxr->rx_prod_bseq = 0;
5140		rxr->rx_prod = 0;
5141		rxr->rx_cons = 0;
5142		rxr->rx_pg_prod = 0;
5143		rxr->rx_pg_cons = 0;
5144	}
5145}
5146
5147static void
5148bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5149{
5150	u32 val, offset0, offset1, offset2, offset3;
5151	u32 cid_addr = GET_CID_ADDR(cid);
5152
5153	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5154		offset0 = BNX2_L2CTX_TYPE_XI;
5155		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5156		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5157		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5158	} else {
5159		offset0 = BNX2_L2CTX_TYPE;
5160		offset1 = BNX2_L2CTX_CMD_TYPE;
5161		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5162		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5163	}
5164	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5165	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5166
5167	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5168	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5169
5170	val = (u64) txr->tx_desc_mapping >> 32;
5171	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5172
5173	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5174	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5175}
5176
5177static void
5178bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5179{
5180	struct bnx2_tx_bd *txbd;
5181	u32 cid = TX_CID;
5182	struct bnx2_napi *bnapi;
5183	struct bnx2_tx_ring_info *txr;
5184
5185	bnapi = &bp->bnx2_napi[ring_num];
5186	txr = &bnapi->tx_ring;
5187
5188	if (ring_num == 0)
5189		cid = TX_CID;
5190	else
5191		cid = TX_TSS_CID + ring_num - 1;
5192
5193	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5194
5195	txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5196
5197	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5198	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5199
5200	txr->tx_prod = 0;
5201	txr->tx_prod_bseq = 0;
5202
5203	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5204	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5205
5206	bnx2_init_tx_context(bp, cid, txr);
5207}
5208
5209static void
5210bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5211		     u32 buf_size, int num_rings)
5212{
5213	int i;
5214	struct bnx2_rx_bd *rxbd;
5215
5216	for (i = 0; i < num_rings; i++) {
5217		int j;
5218
5219		rxbd = &rx_ring[i][0];
5220		for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5221			rxbd->rx_bd_len = buf_size;
5222			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5223		}
5224		if (i == (num_rings - 1))
5225			j = 0;
5226		else
5227			j = i + 1;
5228		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5229		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5230	}
5231}
5232
5233static void
5234bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5235{
5236	int i;
5237	u16 prod, ring_prod;
5238	u32 cid, rx_cid_addr, val;
5239	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5240	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5241
5242	if (ring_num == 0)
5243		cid = RX_CID;
5244	else
5245		cid = RX_RSS_CID + ring_num - 1;
5246
5247	rx_cid_addr = GET_CID_ADDR(cid);
5248
5249	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5250			     bp->rx_buf_use_size, bp->rx_max_ring);
5251
5252	bnx2_init_rx_context(bp, cid);
5253
5254	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5255		val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5256		BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5257	}
5258
5259	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5260	if (bp->rx_pg_ring_size) {
5261		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5262				     rxr->rx_pg_desc_mapping,
5263				     PAGE_SIZE, bp->rx_max_pg_ring);
5264		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5265		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5266		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5267		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5268
5269		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5270		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5271
5272		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5273		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5274
5275		if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5276			BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5277	}
5278
5279	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5280	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5281
5282	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5283	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5284
5285	ring_prod = prod = rxr->rx_pg_prod;
5286	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5287		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5288			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5289				    ring_num, i, bp->rx_pg_ring_size);
5290			break;
5291		}
5292		prod = BNX2_NEXT_RX_BD(prod);
5293		ring_prod = BNX2_RX_PG_RING_IDX(prod);
5294	}
5295	rxr->rx_pg_prod = prod;
5296
5297	ring_prod = prod = rxr->rx_prod;
5298	for (i = 0; i < bp->rx_ring_size; i++) {
5299		if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5300			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5301				    ring_num, i, bp->rx_ring_size);
5302			break;
5303		}
5304		prod = BNX2_NEXT_RX_BD(prod);
5305		ring_prod = BNX2_RX_RING_IDX(prod);
5306	}
5307	rxr->rx_prod = prod;
5308
5309	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5310	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5311	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5312
5313	BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5314	BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5315
5316	BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5317}
5318
5319static void
5320bnx2_init_all_rings(struct bnx2 *bp)
5321{
5322	int i;
5323	u32 val;
5324
5325	bnx2_clear_ring_states(bp);
5326
5327	BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5328	for (i = 0; i < bp->num_tx_rings; i++)
5329		bnx2_init_tx_ring(bp, i);
5330
5331	if (bp->num_tx_rings > 1)
5332		BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5333			(TX_TSS_CID << 7));
5334
5335	BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5336	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5337
5338	for (i = 0; i < bp->num_rx_rings; i++)
5339		bnx2_init_rx_ring(bp, i);
5340
5341	if (bp->num_rx_rings > 1) {
5342		u32 tbl_32 = 0;
5343
5344		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5345			int shift = (i % 8) << 2;
5346
5347			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5348			if ((i % 8) == 7) {
5349				BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5350				BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5351					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5352					BNX2_RLUP_RSS_COMMAND_WRITE |
5353					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
5354				tbl_32 = 0;
5355			}
5356		}
5357
5358		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5359		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5360
5361		BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5362
5363	}
5364}
5365
5366static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5367{
5368	u32 max, num_rings = 1;
5369
5370	while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5371		ring_size -= BNX2_MAX_RX_DESC_CNT;
5372		num_rings++;
5373	}
5374	/* round to next power of 2 */
5375	max = max_size;
5376	while ((max & num_rings) == 0)
5377		max >>= 1;
5378
5379	if (num_rings != max)
5380		max <<= 1;
5381
5382	return max;
5383}
5384
5385static void
5386bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5387{
5388	u32 rx_size, rx_space, jumbo_size;
5389
5390	/* 8 for CRC and VLAN */
5391	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5392
5393	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5394		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5395
5396	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5397	bp->rx_pg_ring_size = 0;
5398	bp->rx_max_pg_ring = 0;
5399	bp->rx_max_pg_ring_idx = 0;
5400	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5401		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5402
5403		jumbo_size = size * pages;
5404		if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5405			jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5406
5407		bp->rx_pg_ring_size = jumbo_size;
5408		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5409							BNX2_MAX_RX_PG_RINGS);
5410		bp->rx_max_pg_ring_idx =
5411			(bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5412		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5413		bp->rx_copy_thresh = 0;
5414	}
5415
5416	bp->rx_buf_use_size = rx_size;
5417	/* hw alignment + build_skb() overhead*/
5418	bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5419		NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
5420	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5421	bp->rx_ring_size = size;
5422	bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5423	bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5424}
5425
5426static void
5427bnx2_free_tx_skbs(struct bnx2 *bp)
5428{
5429	int i;
5430
5431	for (i = 0; i < bp->num_tx_rings; i++) {
5432		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5433		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5434		int j;
5435
5436		if (!txr->tx_buf_ring)
5437			continue;
5438
5439		for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5440			struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5441			struct sk_buff *skb = tx_buf->skb;
5442			int k, last;
5443
5444			if (!skb) {
5445				j = BNX2_NEXT_TX_BD(j);
5446				continue;
5447			}
5448
5449			dma_unmap_single(&bp->pdev->dev,
5450					 dma_unmap_addr(tx_buf, mapping),
5451					 skb_headlen(skb),
5452					 PCI_DMA_TODEVICE);
5453
5454			tx_buf->skb = NULL;
5455
5456			last = tx_buf->nr_frags;
5457			j = BNX2_NEXT_TX_BD(j);
5458			for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5459				tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5460				dma_unmap_page(&bp->pdev->dev,
5461					dma_unmap_addr(tx_buf, mapping),
5462					skb_frag_size(&skb_shinfo(skb)->frags[k]),
5463					PCI_DMA_TODEVICE);
5464			}
5465			dev_kfree_skb(skb);
5466		}
5467		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5468	}
5469}
5470
5471static void
5472bnx2_free_rx_skbs(struct bnx2 *bp)
5473{
5474	int i;
5475
5476	for (i = 0; i < bp->num_rx_rings; i++) {
5477		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5478		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5479		int j;
5480
5481		if (!rxr->rx_buf_ring)
5482			return;
5483
5484		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5485			struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5486			u8 *data = rx_buf->data;
5487
5488			if (!data)
5489				continue;
5490
5491			dma_unmap_single(&bp->pdev->dev,
5492					 dma_unmap_addr(rx_buf, mapping),
5493					 bp->rx_buf_use_size,
5494					 PCI_DMA_FROMDEVICE);
5495
5496			rx_buf->data = NULL;
5497
5498			kfree(data);
5499		}
5500		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5501			bnx2_free_rx_page(bp, rxr, j);
5502	}
5503}
5504
5505static void
5506bnx2_free_skbs(struct bnx2 *bp)
5507{
5508	bnx2_free_tx_skbs(bp);
5509	bnx2_free_rx_skbs(bp);
5510}
5511
5512static int
5513bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5514{
5515	int rc;
5516
5517	rc = bnx2_reset_chip(bp, reset_code);
5518	bnx2_free_skbs(bp);
5519	if (rc)
5520		return rc;
5521
5522	if ((rc = bnx2_init_chip(bp)) != 0)
5523		return rc;
5524
5525	bnx2_init_all_rings(bp);
5526	return 0;
5527}
5528
5529static int
5530bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5531{
5532	int rc;
5533
5534	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5535		return rc;
5536
5537	spin_lock_bh(&bp->phy_lock);
5538	bnx2_init_phy(bp, reset_phy);
5539	bnx2_set_link(bp);
5540	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5541		bnx2_remote_phy_event(bp);
5542	spin_unlock_bh(&bp->phy_lock);
5543	return 0;
5544}
5545
5546static int
5547bnx2_shutdown_chip(struct bnx2 *bp)
5548{
5549	u32 reset_code;
5550
5551	if (bp->flags & BNX2_FLAG_NO_WOL)
5552		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5553	else if (bp->wol)
5554		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5555	else
5556		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5557
5558	return bnx2_reset_chip(bp, reset_code);
5559}
5560
5561static int
5562bnx2_test_registers(struct bnx2 *bp)
5563{
5564	int ret;
5565	int i, is_5709;
5566	static const struct {
5567		u16   offset;
5568		u16   flags;
5569#define BNX2_FL_NOT_5709	1
5570		u32   rw_mask;
5571		u32   ro_mask;
5572	} reg_tbl[] = {
5573		{ 0x006c, 0, 0x00000000, 0x0000003f },
5574		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5575		{ 0x0094, 0, 0x00000000, 0x00000000 },
5576
5577		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5578		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5579		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5580		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5581		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5582		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5583		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5584		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5585		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5586
5587		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5588		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5589		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5590		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5591		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5592		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5593
5594		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5595		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5596		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5597
5598		{ 0x1000, 0, 0x00000000, 0x00000001 },
5599		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5600
5601		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5602		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5603		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5604		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5605		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5606		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5607		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5608		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5609		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5610		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5611
5612		{ 0x1800, 0, 0x00000000, 0x00000001 },
5613		{ 0x1804, 0, 0x00000000, 0x00000003 },
5614
5615		{ 0x2800, 0, 0x00000000, 0x00000001 },
5616		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5617		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5618		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5619		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5620		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5621		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5622		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5623		{ 0x2840, 0, 0x00000000, 0xffffffff },
5624		{ 0x2844, 0, 0x00000000, 0xffffffff },
5625		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5626		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5627
5628		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5629		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5630
5631		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5632		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5633		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5634		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5635		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5636		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5637		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5638		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5639		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5640
5641		{ 0x5004, 0, 0x00000000, 0x0000007f },
5642		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5643
5644		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5645		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5646		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5647		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5648		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5649		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5650		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5651		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5652		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5653
5654		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5655		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5656		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5657		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5658		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5659		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5660		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5661		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5662		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5663		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5664		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5665		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5666		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5667		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5668		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5669		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5670		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5671		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5672		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5673		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5674		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5675		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5676		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5677
5678		{ 0xffff, 0, 0x00000000, 0x00000000 },
5679	};
5680
5681	ret = 0;
5682	is_5709 = 0;
5683	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5684		is_5709 = 1;
5685
5686	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5687		u32 offset, rw_mask, ro_mask, save_val, val;
5688		u16 flags = reg_tbl[i].flags;
5689
5690		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5691			continue;
5692
5693		offset = (u32) reg_tbl[i].offset;
5694		rw_mask = reg_tbl[i].rw_mask;
5695		ro_mask = reg_tbl[i].ro_mask;
5696
5697		save_val = readl(bp->regview + offset);
5698
5699		writel(0, bp->regview + offset);
5700
5701		val = readl(bp->regview + offset);
5702		if ((val & rw_mask) != 0) {
5703			goto reg_test_err;
5704		}
5705
5706		if ((val & ro_mask) != (save_val & ro_mask)) {
5707			goto reg_test_err;
5708		}
5709
5710		writel(0xffffffff, bp->regview + offset);
5711
5712		val = readl(bp->regview + offset);
5713		if ((val & rw_mask) != rw_mask) {
5714			goto reg_test_err;
5715		}
5716
5717		if ((val & ro_mask) != (save_val & ro_mask)) {
5718			goto reg_test_err;
5719		}
5720
5721		writel(save_val, bp->regview + offset);
5722		continue;
5723
5724reg_test_err:
5725		writel(save_val, bp->regview + offset);
5726		ret = -ENODEV;
5727		break;
5728	}
5729	return ret;
5730}
5731
5732static int
5733bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5734{
5735	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5736		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5737	int i;
5738
5739	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5740		u32 offset;
5741
5742		for (offset = 0; offset < size; offset += 4) {
5743
5744			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5745
5746			if (bnx2_reg_rd_ind(bp, start + offset) !=
5747				test_pattern[i]) {
5748				return -ENODEV;
5749			}
5750		}
5751	}
5752	return 0;
5753}
5754
5755static int
5756bnx2_test_memory(struct bnx2 *bp)
5757{
5758	int ret = 0;
5759	int i;
5760	static struct mem_entry {
5761		u32   offset;
5762		u32   len;
5763	} mem_tbl_5706[] = {
5764		{ 0x60000,  0x4000 },
5765		{ 0xa0000,  0x3000 },
5766		{ 0xe0000,  0x4000 },
5767		{ 0x120000, 0x4000 },
5768		{ 0x1a0000, 0x4000 },
5769		{ 0x160000, 0x4000 },
5770		{ 0xffffffff, 0    },
5771	},
5772	mem_tbl_5709[] = {
5773		{ 0x60000,  0x4000 },
5774		{ 0xa0000,  0x3000 },
5775		{ 0xe0000,  0x4000 },
5776		{ 0x120000, 0x4000 },
5777		{ 0x1a0000, 0x4000 },
5778		{ 0xffffffff, 0    },
5779	};
5780	struct mem_entry *mem_tbl;
5781
5782	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5783		mem_tbl = mem_tbl_5709;
5784	else
5785		mem_tbl = mem_tbl_5706;
5786
5787	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5788		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5789			mem_tbl[i].len)) != 0) {
5790			return ret;
5791		}
5792	}
5793
5794	return ret;
5795}
5796
5797#define BNX2_MAC_LOOPBACK	0
5798#define BNX2_PHY_LOOPBACK	1
5799
5800static int
5801bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802{
5803	unsigned int pkt_size, num_pkts, i;
5804	struct sk_buff *skb;
5805	u8 *data;
5806	unsigned char *packet;
5807	u16 rx_start_idx, rx_idx;
5808	dma_addr_t map;
5809	struct bnx2_tx_bd *txbd;
5810	struct bnx2_sw_bd *rx_buf;
5811	struct l2_fhdr *rx_hdr;
5812	int ret = -ENODEV;
5813	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5814	struct bnx2_tx_ring_info *txr;
5815	struct bnx2_rx_ring_info *rxr;
5816
5817	tx_napi = bnapi;
5818
5819	txr = &tx_napi->tx_ring;
5820	rxr = &bnapi->rx_ring;
5821	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5822		bp->loopback = MAC_LOOPBACK;
5823		bnx2_set_mac_loopback(bp);
5824	}
5825	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5826		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5827			return 0;
5828
5829		bp->loopback = PHY_LOOPBACK;
5830		bnx2_set_phy_loopback(bp);
5831	}
5832	else
5833		return -EINVAL;
5834
5835	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5836	skb = netdev_alloc_skb(bp->dev, pkt_size);
5837	if (!skb)
5838		return -ENOMEM;
5839	packet = skb_put(skb, pkt_size);
5840	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5841	memset(packet + ETH_ALEN, 0x0, 8);
5842	for (i = 14; i < pkt_size; i++)
5843		packet[i] = (unsigned char) (i & 0xff);
5844
5845	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5846			     PCI_DMA_TODEVICE);
5847	if (dma_mapping_error(&bp->pdev->dev, map)) {
5848		dev_kfree_skb(skb);
5849		return -EIO;
5850	}
5851
5852	BNX2_WR(bp, BNX2_HC_COMMAND,
5853		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5854
5855	BNX2_RD(bp, BNX2_HC_COMMAND);
5856
5857	udelay(5);
5858	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5859
5860	num_pkts = 0;
5861
5862	txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5863
5864	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5865	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5866	txbd->tx_bd_mss_nbytes = pkt_size;
5867	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5868
5869	num_pkts++;
5870	txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5871	txr->tx_prod_bseq += pkt_size;
5872
5873	BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5874	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5875
5876	udelay(100);
5877
5878	BNX2_WR(bp, BNX2_HC_COMMAND,
5879		bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5880
5881	BNX2_RD(bp, BNX2_HC_COMMAND);
5882
5883	udelay(5);
5884
5885	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5886	dev_kfree_skb(skb);
5887
5888	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5889		goto loopback_test_done;
5890
5891	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5892	if (rx_idx != rx_start_idx + num_pkts) {
5893		goto loopback_test_done;
5894	}
5895
5896	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5897	data = rx_buf->data;
5898
5899	rx_hdr = get_l2_fhdr(data);
5900	data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
5901
5902	dma_sync_single_for_cpu(&bp->pdev->dev,
5903		dma_unmap_addr(rx_buf, mapping),
5904		bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
5905
5906	if (rx_hdr->l2_fhdr_status &
5907		(L2_FHDR_ERRORS_BAD_CRC |
5908		L2_FHDR_ERRORS_PHY_DECODE |
5909		L2_FHDR_ERRORS_ALIGNMENT |
5910		L2_FHDR_ERRORS_TOO_SHORT |
5911		L2_FHDR_ERRORS_GIANT_FRAME)) {
5912
5913		goto loopback_test_done;
5914	}
5915
5916	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5917		goto loopback_test_done;
5918	}
5919
5920	for (i = 14; i < pkt_size; i++) {
5921		if (*(data + i) != (unsigned char) (i & 0xff)) {
5922			goto loopback_test_done;
5923		}
5924	}
5925
5926	ret = 0;
5927
5928loopback_test_done:
5929	bp->loopback = 0;
5930	return ret;
5931}
5932
5933#define BNX2_MAC_LOOPBACK_FAILED	1
5934#define BNX2_PHY_LOOPBACK_FAILED	2
5935#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5936					 BNX2_PHY_LOOPBACK_FAILED)
5937
5938static int
5939bnx2_test_loopback(struct bnx2 *bp)
5940{
5941	int rc = 0;
5942
5943	if (!netif_running(bp->dev))
5944		return BNX2_LOOPBACK_FAILED;
5945
5946	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5947	spin_lock_bh(&bp->phy_lock);
5948	bnx2_init_phy(bp, 1);
5949	spin_unlock_bh(&bp->phy_lock);
5950	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5951		rc |= BNX2_MAC_LOOPBACK_FAILED;
5952	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5953		rc |= BNX2_PHY_LOOPBACK_FAILED;
5954	return rc;
5955}
5956
5957#define NVRAM_SIZE 0x200
5958#define CRC32_RESIDUAL 0xdebb20e3
5959
5960static int
5961bnx2_test_nvram(struct bnx2 *bp)
5962{
5963	__be32 buf[NVRAM_SIZE / 4];
5964	u8 *data = (u8 *) buf;
5965	int rc = 0;
5966	u32 magic, csum;
5967
5968	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5969		goto test_nvram_done;
5970
5971        magic = be32_to_cpu(buf[0]);
5972	if (magic != 0x669955aa) {
5973		rc = -ENODEV;
5974		goto test_nvram_done;
5975	}
5976
5977	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5978		goto test_nvram_done;
5979
5980	csum = ether_crc_le(0x100, data);
5981	if (csum != CRC32_RESIDUAL) {
5982		rc = -ENODEV;
5983		goto test_nvram_done;
5984	}
5985
5986	csum = ether_crc_le(0x100, data + 0x100);
5987	if (csum != CRC32_RESIDUAL) {
5988		rc = -ENODEV;
5989	}
5990
5991test_nvram_done:
5992	return rc;
5993}
5994
5995static int
5996bnx2_test_link(struct bnx2 *bp)
5997{
5998	u32 bmsr;
5999
6000	if (!netif_running(bp->dev))
6001		return -ENODEV;
6002
6003	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6004		if (bp->link_up)
6005			return 0;
6006		return -ENODEV;
6007	}
6008	spin_lock_bh(&bp->phy_lock);
6009	bnx2_enable_bmsr1(bp);
6010	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6011	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
6012	bnx2_disable_bmsr1(bp);
6013	spin_unlock_bh(&bp->phy_lock);
6014
6015	if (bmsr & BMSR_LSTATUS) {
6016		return 0;
6017	}
6018	return -ENODEV;
6019}
6020
6021static int
6022bnx2_test_intr(struct bnx2 *bp)
6023{
6024	int i;
6025	u16 status_idx;
6026
6027	if (!netif_running(bp->dev))
6028		return -ENODEV;
6029
6030	status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6031
6032	/* This register is not touched during run-time. */
6033	BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6034	BNX2_RD(bp, BNX2_HC_COMMAND);
6035
6036	for (i = 0; i < 10; i++) {
6037		if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6038			status_idx) {
6039
6040			break;
6041		}
6042
6043		msleep_interruptible(10);
6044	}
6045	if (i < 10)
6046		return 0;
6047
6048	return -ENODEV;
6049}
6050
6051/* Determining link for parallel detection. */
6052static int
6053bnx2_5706_serdes_has_link(struct bnx2 *bp)
6054{
6055	u32 mode_ctl, an_dbg, exp;
6056
6057	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6058		return 0;
6059
6060	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6061	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6062
6063	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
6064		return 0;
6065
6066	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6067	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6068	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6069
6070	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
6071		return 0;
6072
6073	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6074	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6075	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6076
6077	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
6078		return 0;
6079
6080	return 1;
6081}
6082
6083static void
6084bnx2_5706_serdes_timer(struct bnx2 *bp)
6085{
6086	int check_link = 1;
6087
6088	spin_lock(&bp->phy_lock);
6089	if (bp->serdes_an_pending) {
6090		bp->serdes_an_pending--;
6091		check_link = 0;
6092	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6093		u32 bmcr;
6094
6095		bp->current_interval = BNX2_TIMER_INTERVAL;
6096
6097		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6098
6099		if (bmcr & BMCR_ANENABLE) {
6100			if (bnx2_5706_serdes_has_link(bp)) {
6101				bmcr &= ~BMCR_ANENABLE;
6102				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6103				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6104				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6105			}
6106		}
6107	}
6108	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6109		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6110		u32 phy2;
6111
6112		bnx2_write_phy(bp, 0x17, 0x0f01);
6113		bnx2_read_phy(bp, 0x15, &phy2);
6114		if (phy2 & 0x20) {
6115			u32 bmcr;
6116
6117			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6118			bmcr |= BMCR_ANENABLE;
6119			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6120
6121			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6122		}
6123	} else
6124		bp->current_interval = BNX2_TIMER_INTERVAL;
6125
6126	if (check_link) {
6127		u32 val;
6128
6129		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6130		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6131		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6132
6133		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6134			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6135				bnx2_5706s_force_link_dn(bp, 1);
6136				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6137			} else
6138				bnx2_set_link(bp);
6139		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6140			bnx2_set_link(bp);
6141	}
6142	spin_unlock(&bp->phy_lock);
6143}
6144
6145static void
6146bnx2_5708_serdes_timer(struct bnx2 *bp)
6147{
6148	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6149		return;
6150
6151	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6152		bp->serdes_an_pending = 0;
6153		return;
6154	}
6155
6156	spin_lock(&bp->phy_lock);
6157	if (bp->serdes_an_pending)
6158		bp->serdes_an_pending--;
6159	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6160		u32 bmcr;
6161
6162		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6163		if (bmcr & BMCR_ANENABLE) {
6164			bnx2_enable_forced_2g5(bp);
6165			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6166		} else {
6167			bnx2_disable_forced_2g5(bp);
6168			bp->serdes_an_pending = 2;
6169			bp->current_interval = BNX2_TIMER_INTERVAL;
6170		}
6171
6172	} else
6173		bp->current_interval = BNX2_TIMER_INTERVAL;
6174
6175	spin_unlock(&bp->phy_lock);
6176}
6177
6178static void
6179bnx2_timer(struct timer_list *t)
6180{
6181	struct bnx2 *bp = from_timer(bp, t, timer);
6182
6183	if (!netif_running(bp->dev))
6184		return;
6185
6186	if (atomic_read(&bp->intr_sem) != 0)
6187		goto bnx2_restart_timer;
6188
6189	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6190	     BNX2_FLAG_USING_MSI)
6191		bnx2_chk_missed_msi(bp);
6192
6193	bnx2_send_heart_beat(bp);
6194
6195	bp->stats_blk->stat_FwRxDrop =
6196		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6197
6198	/* workaround occasional corrupted counters */
6199	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6200		BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6201			BNX2_HC_COMMAND_STATS_NOW);
6202
6203	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6204		if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6205			bnx2_5706_serdes_timer(bp);
6206		else
6207			bnx2_5708_serdes_timer(bp);
6208	}
6209
6210bnx2_restart_timer:
6211	mod_timer(&bp->timer, jiffies + bp->current_interval);
6212}
6213
6214static int
6215bnx2_request_irq(struct bnx2 *bp)
6216{
6217	unsigned long flags;
6218	struct bnx2_irq *irq;
6219	int rc = 0, i;
6220
6221	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6222		flags = 0;
6223	else
6224		flags = IRQF_SHARED;
6225
6226	for (i = 0; i < bp->irq_nvecs; i++) {
6227		irq = &bp->irq_tbl[i];
6228		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6229				 &bp->bnx2_napi[i]);
6230		if (rc)
6231			break;
6232		irq->requested = 1;
6233	}
6234	return rc;
6235}
6236
6237static void
6238__bnx2_free_irq(struct bnx2 *bp)
6239{
6240	struct bnx2_irq *irq;
6241	int i;
6242
6243	for (i = 0; i < bp->irq_nvecs; i++) {
6244		irq = &bp->irq_tbl[i];
6245		if (irq->requested)
6246			free_irq(irq->vector, &bp->bnx2_napi[i]);
6247		irq->requested = 0;
6248	}
6249}
6250
6251static void
6252bnx2_free_irq(struct bnx2 *bp)
6253{
6254
6255	__bnx2_free_irq(bp);
6256	if (bp->flags & BNX2_FLAG_USING_MSI)
6257		pci_disable_msi(bp->pdev);
6258	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6259		pci_disable_msix(bp->pdev);
6260
6261	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6262}
6263
6264static void
6265bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6266{
6267	int i, total_vecs;
6268	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6269	struct net_device *dev = bp->dev;
6270	const int len = sizeof(bp->irq_tbl[0].name);
6271
6272	bnx2_setup_msix_tbl(bp);
6273	BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6274	BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6275	BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6276
6277	/*  Need to flush the previous three writes to ensure MSI-X
6278	 *  is setup properly */
6279	BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6280
6281	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6282		msix_ent[i].entry = i;
6283		msix_ent[i].vector = 0;
6284	}
6285
6286	total_vecs = msix_vecs;
6287#ifdef BCM_CNIC
6288	total_vecs++;
6289#endif
6290	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6291					   BNX2_MIN_MSIX_VEC, total_vecs);
6292	if (total_vecs < 0)
6293		return;
6294
6295	msix_vecs = total_vecs;
6296#ifdef BCM_CNIC
6297	msix_vecs--;
6298#endif
6299	bp->irq_nvecs = msix_vecs;
6300	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6301	for (i = 0; i < total_vecs; i++) {
6302		bp->irq_tbl[i].vector = msix_ent[i].vector;
6303		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6304		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6305	}
6306}
6307
6308static int
6309bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6310{
6311	int cpus = netif_get_num_default_rss_queues();
6312	int msix_vecs;
6313
6314	if (!bp->num_req_rx_rings)
6315		msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6316	else if (!bp->num_req_tx_rings)
6317		msix_vecs = max(cpus, bp->num_req_rx_rings);
6318	else
6319		msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6320
6321	msix_vecs = min(msix_vecs, RX_MAX_RINGS);
6322
6323	bp->irq_tbl[0].handler = bnx2_interrupt;
6324	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6325	bp->irq_nvecs = 1;
6326	bp->irq_tbl[0].vector = bp->pdev->irq;
6327
6328	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6329		bnx2_enable_msix(bp, msix_vecs);
6330
6331	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6332	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6333		if (pci_enable_msi(bp->pdev) == 0) {
6334			bp->flags |= BNX2_FLAG_USING_MSI;
6335			if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6336				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6337				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6338			} else
6339				bp->irq_tbl[0].handler = bnx2_msi;
6340
6341			bp->irq_tbl[0].vector = bp->pdev->irq;
6342		}
6343	}
6344
6345	if (!bp->num_req_tx_rings)
6346		bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6347	else
6348		bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6349
6350	if (!bp->num_req_rx_rings)
6351		bp->num_rx_rings = bp->irq_nvecs;
6352	else
6353		bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6354
6355	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6356
6357	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6358}
6359
6360/* Called with rtnl_lock */
6361static int
6362bnx2_open(struct net_device *dev)
6363{
6364	struct bnx2 *bp = netdev_priv(dev);
6365	int rc;
6366
6367	rc = bnx2_request_firmware(bp);
6368	if (rc < 0)
6369		goto out;
6370
6371	netif_carrier_off(dev);
6372
6373	bnx2_disable_int(bp);
6374
6375	rc = bnx2_setup_int_mode(bp, disable_msi);
6376	if (rc)
6377		goto open_err;
6378	bnx2_init_napi(bp);
6379	bnx2_napi_enable(bp);
6380	rc = bnx2_alloc_mem(bp);
6381	if (rc)
6382		goto open_err;
6383
6384	rc = bnx2_request_irq(bp);
6385	if (rc)
6386		goto open_err;
6387
6388	rc = bnx2_init_nic(bp, 1);
6389	if (rc)
6390		goto open_err;
6391
6392	mod_timer(&bp->timer, jiffies + bp->current_interval);
6393
6394	atomic_set(&bp->intr_sem, 0);
6395
6396	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6397
6398	bnx2_enable_int(bp);
6399
6400	if (bp->flags & BNX2_FLAG_USING_MSI) {
6401		/* Test MSI to make sure it is working
6402		 * If MSI test fails, go back to INTx mode
6403		 */
6404		if (bnx2_test_intr(bp) != 0) {
6405			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6406
6407			bnx2_disable_int(bp);
6408			bnx2_free_irq(bp);
6409
6410			bnx2_setup_int_mode(bp, 1);
6411
6412			rc = bnx2_init_nic(bp, 0);
6413
6414			if (!rc)
6415				rc = bnx2_request_irq(bp);
6416
6417			if (rc) {
6418				del_timer_sync(&bp->timer);
6419				goto open_err;
6420			}
6421			bnx2_enable_int(bp);
6422		}
6423	}
6424	if (bp->flags & BNX2_FLAG_USING_MSI)
6425		netdev_info(dev, "using MSI\n");
6426	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6427		netdev_info(dev, "using MSIX\n");
6428
6429	netif_tx_start_all_queues(dev);
6430out:
6431	return rc;
6432
6433open_err:
6434	bnx2_napi_disable(bp);
6435	bnx2_free_skbs(bp);
6436	bnx2_free_irq(bp);
6437	bnx2_free_mem(bp);
6438	bnx2_del_napi(bp);
6439	bnx2_release_firmware(bp);
6440	goto out;
6441}
6442
6443static void
6444bnx2_reset_task(struct work_struct *work)
6445{
6446	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6447	int rc;
6448	u16 pcicmd;
6449
6450	rtnl_lock();
6451	if (!netif_running(bp->dev)) {
6452		rtnl_unlock();
6453		return;
6454	}
6455
6456	bnx2_netif_stop(bp, true);
6457
6458	pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6459	if (!(pcicmd & PCI_COMMAND_MEMORY)) {
6460		/* in case PCI block has reset */
6461		pci_restore_state(bp->pdev);
6462		pci_save_state(bp->pdev);
6463	}
6464	rc = bnx2_init_nic(bp, 1);
6465	if (rc) {
6466		netdev_err(bp->dev, "failed to reset NIC, closing\n");
6467		bnx2_napi_enable(bp);
6468		dev_close(bp->dev);
6469		rtnl_unlock();
6470		return;
6471	}
6472
6473	atomic_set(&bp->intr_sem, 1);
6474	bnx2_netif_start(bp, true);
6475	rtnl_unlock();
6476}
6477
6478#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
6479
6480static void
6481bnx2_dump_ftq(struct bnx2 *bp)
6482{
6483	int i;
6484	u32 reg, bdidx, cid, valid;
6485	struct net_device *dev = bp->dev;
6486	static const struct ftq_reg {
6487		char *name;
6488		u32 off;
6489	} ftq_arr[] = {
6490		BNX2_FTQ_ENTRY(RV2P_P),
6491		BNX2_FTQ_ENTRY(RV2P_T),
6492		BNX2_FTQ_ENTRY(RV2P_M),
6493		BNX2_FTQ_ENTRY(TBDR_),
6494		BNX2_FTQ_ENTRY(TDMA_),
6495		BNX2_FTQ_ENTRY(TXP_),
6496		BNX2_FTQ_ENTRY(TXP_),
6497		BNX2_FTQ_ENTRY(TPAT_),
6498		BNX2_FTQ_ENTRY(RXP_C),
6499		BNX2_FTQ_ENTRY(RXP_),
6500		BNX2_FTQ_ENTRY(COM_COMXQ_),
6501		BNX2_FTQ_ENTRY(COM_COMTQ_),
6502		BNX2_FTQ_ENTRY(COM_COMQ_),
6503		BNX2_FTQ_ENTRY(CP_CPQ_),
6504	};
6505
6506	netdev_err(dev, "<--- start FTQ dump --->\n");
6507	for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
6508		netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
6509			   bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6510
6511	netdev_err(dev, "CPU states:\n");
6512	for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
6513		netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
6514			   reg, bnx2_reg_rd_ind(bp, reg),
6515			   bnx2_reg_rd_ind(bp, reg + 4),
6516			   bnx2_reg_rd_ind(bp, reg + 8),
6517			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6518			   bnx2_reg_rd_ind(bp, reg + 0x1c),
6519			   bnx2_reg_rd_ind(bp, reg + 0x20));
6520
6521	netdev_err(dev, "<--- end FTQ dump --->\n");
6522	netdev_err(dev, "<--- start TBDC dump --->\n");
6523	netdev_err(dev, "TBDC free cnt: %ld\n",
6524		   BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6525	netdev_err(dev, "LINE     CID  BIDX   CMD  VALIDS\n");
6526	for (i = 0; i < 0x20; i++) {
6527		int j = 0;
6528
6529		BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6530		BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6531			BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6532		BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6533		while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6534			BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6535			j++;
6536
6537		cid = BNX2_RD(bp, BNX2_TBDC_CID);
6538		bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6539		valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6540		netdev_err(dev, "%02x    %06x  %04lx   %02x    [%x]\n",
6541			   i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6542			   bdidx >> 24, (valid >> 8) & 0x0ff);
6543	}
6544	netdev_err(dev, "<--- end TBDC dump --->\n");
6545}
6546
6547static void
6548bnx2_dump_state(struct bnx2 *bp)
6549{
6550	struct net_device *dev = bp->dev;
6551	u32 val1, val2;
6552
6553	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6554	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6555		   atomic_read(&bp->intr_sem), val1);
6556	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6557	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6558	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6559	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6560		   BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6561		   BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6562	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6563		   BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6564	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6565		   BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6566	if (bp->flags & BNX2_FLAG_USING_MSIX)
6567		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6568			   BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6569}
6570
6571static void
6572bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue)
6573{
6574	struct bnx2 *bp = netdev_priv(dev);
6575
6576	bnx2_dump_ftq(bp);
6577	bnx2_dump_state(bp);
6578	bnx2_dump_mcp_state(bp);
6579
6580	/* This allows the netif to be shutdown gracefully before resetting */
6581	schedule_work(&bp->reset_task);
6582}
6583
6584/* Called with netif_tx_lock.
6585 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6586 * netif_wake_queue().
6587 */
6588static netdev_tx_t
6589bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6590{
6591	struct bnx2 *bp = netdev_priv(dev);
6592	dma_addr_t mapping;
6593	struct bnx2_tx_bd *txbd;
6594	struct bnx2_sw_tx_bd *tx_buf;
6595	u32 len, vlan_tag_flags, last_frag, mss;
6596	u16 prod, ring_prod;
6597	int i;
6598	struct bnx2_napi *bnapi;
6599	struct bnx2_tx_ring_info *txr;
6600	struct netdev_queue *txq;
6601
6602	/*  Determine which tx ring we will be placed on */
6603	i = skb_get_queue_mapping(skb);
6604	bnapi = &bp->bnx2_napi[i];
6605	txr = &bnapi->tx_ring;
6606	txq = netdev_get_tx_queue(dev, i);
6607
6608	if (unlikely(bnx2_tx_avail(bp, txr) <
6609	    (skb_shinfo(skb)->nr_frags + 1))) {
6610		netif_tx_stop_queue(txq);
6611		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6612
6613		return NETDEV_TX_BUSY;
6614	}
6615	len = skb_headlen(skb);
6616	prod = txr->tx_prod;
6617	ring_prod = BNX2_TX_RING_IDX(prod);
6618
6619	vlan_tag_flags = 0;
6620	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6621		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6622	}
6623
6624	if (skb_vlan_tag_present(skb)) {
6625		vlan_tag_flags |=
6626			(TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16));
6627	}
6628
6629	if ((mss = skb_shinfo(skb)->gso_size)) {
6630		u32 tcp_opt_len;
6631		struct iphdr *iph;
6632
6633		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6634
6635		tcp_opt_len = tcp_optlen(skb);
6636
6637		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6638			u32 tcp_off = skb_transport_offset(skb) -
6639				      sizeof(struct ipv6hdr) - ETH_HLEN;
6640
6641			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6642					  TX_BD_FLAGS_SW_FLAGS;
6643			if (likely(tcp_off == 0))
6644				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6645			else {
6646				tcp_off >>= 3;
6647				vlan_tag_flags |= ((tcp_off & 0x3) <<
6648						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6649						  ((tcp_off & 0x10) <<
6650						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6651				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6652			}
6653		} else {
6654			iph = ip_hdr(skb);
6655			if (tcp_opt_len || (iph->ihl > 5)) {
6656				vlan_tag_flags |= ((iph->ihl - 5) +
6657						   (tcp_opt_len >> 2)) << 8;
6658			}
6659		}
6660	} else
6661		mss = 0;
6662
6663	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 
6664	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6665		dev_kfree_skb_any(skb);
6666		return NETDEV_TX_OK;
6667	}
6668
6669	tx_buf = &txr->tx_buf_ring[ring_prod];
6670	tx_buf->skb = skb;
6671	dma_unmap_addr_set(tx_buf, mapping, mapping);
6672
6673	txbd = &txr->tx_desc_ring[ring_prod];
6674
6675	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6676	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6677	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6678	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6679
6680	last_frag = skb_shinfo(skb)->nr_frags;
6681	tx_buf->nr_frags = last_frag;
6682	tx_buf->is_gso = skb_is_gso(skb);
6683
6684	for (i = 0; i < last_frag; i++) {
6685		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6686
6687		prod = BNX2_NEXT_TX_BD(prod);
6688		ring_prod = BNX2_TX_RING_IDX(prod);
6689		txbd = &txr->tx_desc_ring[ring_prod];
6690
6691		len = skb_frag_size(frag);
6692		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6693					   DMA_TO_DEVICE);
6694		if (dma_mapping_error(&bp->pdev->dev, mapping))
6695			goto dma_error;
6696		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6697				   mapping);
6698
6699		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6700		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6701		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6702		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6703
6704	}
6705	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6706
6707	/* Sync BD data before updating TX mailbox */
6708	wmb();
6709
6710	netdev_tx_sent_queue(txq, skb->len);
6711
6712	prod = BNX2_NEXT_TX_BD(prod);
6713	txr->tx_prod_bseq += skb->len;
6714
6715	BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6716	BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6717
6718	txr->tx_prod = prod;
6719
6720	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6721		netif_tx_stop_queue(txq);
6722
6723		/* netif_tx_stop_queue() must be done before checking
6724		 * tx index in bnx2_tx_avail() below, because in
6725		 * bnx2_tx_int(), we update tx index before checking for
6726		 * netif_tx_queue_stopped().
6727		 */
6728		smp_mb();
6729		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6730			netif_tx_wake_queue(txq);
6731	}
6732
6733	return NETDEV_TX_OK;
6734dma_error:
6735	/* save value of frag that failed */
6736	last_frag = i;
6737
6738	/* start back at beginning and unmap skb */
6739	prod = txr->tx_prod;
6740	ring_prod = BNX2_TX_RING_IDX(prod);
6741	tx_buf = &txr->tx_buf_ring[ring_prod];
6742	tx_buf->skb = NULL;
6743	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6744			 skb_headlen(skb), PCI_DMA_TODEVICE);
6745
6746	/* unmap remaining mapped pages */
6747	for (i = 0; i < last_frag; i++) {
6748		prod = BNX2_NEXT_TX_BD(prod);
6749		ring_prod = BNX2_TX_RING_IDX(prod);
6750		tx_buf = &txr->tx_buf_ring[ring_prod];
6751		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6752			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6753			       PCI_DMA_TODEVICE);
6754	}
6755
6756	dev_kfree_skb_any(skb);
6757	return NETDEV_TX_OK;
6758}
6759
6760/* Called with rtnl_lock */
6761static int
6762bnx2_close(struct net_device *dev)
6763{
6764	struct bnx2 *bp = netdev_priv(dev);
6765
6766	bnx2_disable_int_sync(bp);
6767	bnx2_napi_disable(bp);
6768	netif_tx_disable(dev);
6769	del_timer_sync(&bp->timer);
6770	bnx2_shutdown_chip(bp);
6771	bnx2_free_irq(bp);
6772	bnx2_free_skbs(bp);
6773	bnx2_free_mem(bp);
6774	bnx2_del_napi(bp);
6775	bp->link_up = 0;
6776	netif_carrier_off(bp->dev);
6777	return 0;
6778}
6779
6780static void
6781bnx2_save_stats(struct bnx2 *bp)
6782{
6783	u32 *hw_stats = (u32 *) bp->stats_blk;
6784	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6785	int i;
6786
6787	/* The 1st 10 counters are 64-bit counters */
6788	for (i = 0; i < 20; i += 2) {
6789		u32 hi;
6790		u64 lo;
6791
6792		hi = temp_stats[i] + hw_stats[i];
6793		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6794		if (lo > 0xffffffff)
6795			hi++;
6796		temp_stats[i] = hi;
6797		temp_stats[i + 1] = lo & 0xffffffff;
6798	}
6799
6800	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6801		temp_stats[i] += hw_stats[i];
6802}
6803
6804#define GET_64BIT_NET_STATS64(ctr)		\
6805	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6806
6807#define GET_64BIT_NET_STATS(ctr)				\
6808	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6809	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6810
6811#define GET_32BIT_NET_STATS(ctr)				\
6812	(unsigned long) (bp->stats_blk->ctr +			\
6813			 bp->temp_stats_blk->ctr)
6814
6815static void
6816bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6817{
6818	struct bnx2 *bp = netdev_priv(dev);
6819
6820	if (!bp->stats_blk)
6821		return;
6822
6823	net_stats->rx_packets =
6824		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6825		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6826		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6827
6828	net_stats->tx_packets =
6829		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6830		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6831		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6832
6833	net_stats->rx_bytes =
6834		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6835
6836	net_stats->tx_bytes =
6837		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6838
6839	net_stats->multicast =
6840		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6841
6842	net_stats->collisions =
6843		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6844
6845	net_stats->rx_length_errors =
6846		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6847		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6848
6849	net_stats->rx_over_errors =
6850		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6851		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6852
6853	net_stats->rx_frame_errors =
6854		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6855
6856	net_stats->rx_crc_errors =
6857		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6858
6859	net_stats->rx_errors = net_stats->rx_length_errors +
6860		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6861		net_stats->rx_crc_errors;
6862
6863	net_stats->tx_aborted_errors =
6864		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6865		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6866
6867	if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6868	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6869		net_stats->tx_carrier_errors = 0;
6870	else {
6871		net_stats->tx_carrier_errors =
6872			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6873	}
6874
6875	net_stats->tx_errors =
6876		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6877		net_stats->tx_aborted_errors +
6878		net_stats->tx_carrier_errors;
6879
6880	net_stats->rx_missed_errors =
6881		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6882		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6883		GET_32BIT_NET_STATS(stat_FwRxDrop);
6884
6885}
6886
6887/* All ethtool functions called with rtnl_lock */
6888
6889static int
6890bnx2_get_link_ksettings(struct net_device *dev,
6891			struct ethtool_link_ksettings *cmd)
6892{
6893	struct bnx2 *bp = netdev_priv(dev);
6894	int support_serdes = 0, support_copper = 0;
6895	u32 supported, advertising;
6896
6897	supported = SUPPORTED_Autoneg;
6898	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6899		support_serdes = 1;
6900		support_copper = 1;
6901	} else if (bp->phy_port == PORT_FIBRE)
6902		support_serdes = 1;
6903	else
6904		support_copper = 1;
6905
6906	if (support_serdes) {
6907		supported |= SUPPORTED_1000baseT_Full |
6908			SUPPORTED_FIBRE;
6909		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6910			supported |= SUPPORTED_2500baseX_Full;
6911	}
6912	if (support_copper) {
6913		supported |= SUPPORTED_10baseT_Half |
6914			SUPPORTED_10baseT_Full |
6915			SUPPORTED_100baseT_Half |
6916			SUPPORTED_100baseT_Full |
6917			SUPPORTED_1000baseT_Full |
6918			SUPPORTED_TP;
6919	}
6920
6921	spin_lock_bh(&bp->phy_lock);
6922	cmd->base.port = bp->phy_port;
6923	advertising = bp->advertising;
6924
6925	if (bp->autoneg & AUTONEG_SPEED) {
6926		cmd->base.autoneg = AUTONEG_ENABLE;
6927	} else {
6928		cmd->base.autoneg = AUTONEG_DISABLE;
6929	}
6930
6931	if (netif_carrier_ok(dev)) {
6932		cmd->base.speed = bp->line_speed;
6933		cmd->base.duplex = bp->duplex;
6934		if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6935			if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6936				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
6937			else
6938				cmd->base.eth_tp_mdix = ETH_TP_MDI;
6939		}
6940	}
6941	else {
6942		cmd->base.speed = SPEED_UNKNOWN;
6943		cmd->base.duplex = DUPLEX_UNKNOWN;
6944	}
6945	spin_unlock_bh(&bp->phy_lock);
6946
6947	cmd->base.phy_address = bp->phy_addr;
6948
6949	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
6950						supported);
6951	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
6952						advertising);
6953
6954	return 0;
6955}
6956
6957static int
6958bnx2_set_link_ksettings(struct net_device *dev,
6959			const struct ethtool_link_ksettings *cmd)
6960{
6961	struct bnx2 *bp = netdev_priv(dev);
6962	u8 autoneg = bp->autoneg;
6963	u8 req_duplex = bp->req_duplex;
6964	u16 req_line_speed = bp->req_line_speed;
6965	u32 advertising = bp->advertising;
6966	int err = -EINVAL;
6967
6968	spin_lock_bh(&bp->phy_lock);
6969
6970	if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE)
6971		goto err_out_unlock;
6972
6973	if (cmd->base.port != bp->phy_port &&
6974	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6975		goto err_out_unlock;
6976
6977	/* If device is down, we can store the settings only if the user
6978	 * is setting the currently active port.
6979	 */
6980	if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6981		goto err_out_unlock;
6982
6983	if (cmd->base.autoneg == AUTONEG_ENABLE) {
6984		autoneg |= AUTONEG_SPEED;
6985
6986		ethtool_convert_link_mode_to_legacy_u32(
6987			&advertising, cmd->link_modes.advertising);
6988
6989		if (cmd->base.port == PORT_TP) {
6990			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6991			if (!advertising)
6992				advertising = ETHTOOL_ALL_COPPER_SPEED;
6993		} else {
6994			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6995			if (!advertising)
6996				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6997		}
6998		advertising |= ADVERTISED_Autoneg;
6999	}
7000	else {
7001		u32 speed = cmd->base.speed;
7002
7003		if (cmd->base.port == PORT_FIBRE) {
7004			if ((speed != SPEED_1000 &&
7005			     speed != SPEED_2500) ||
7006			    (cmd->base.duplex != DUPLEX_FULL))
7007				goto err_out_unlock;
7008
7009			if (speed == SPEED_2500 &&
7010			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7011				goto err_out_unlock;
7012		} else if (speed == SPEED_1000 || speed == SPEED_2500)
7013			goto err_out_unlock;
7014
7015		autoneg &= ~AUTONEG_SPEED;
7016		req_line_speed = speed;
7017		req_duplex = cmd->base.duplex;
7018		advertising = 0;
7019	}
7020
7021	bp->autoneg = autoneg;
7022	bp->advertising = advertising;
7023	bp->req_line_speed = req_line_speed;
7024	bp->req_duplex = req_duplex;
7025
7026	err = 0;
7027	/* If device is down, the new settings will be picked up when it is
7028	 * brought up.
7029	 */
7030	if (netif_running(dev))
7031		err = bnx2_setup_phy(bp, cmd->base.port);
7032
7033err_out_unlock:
7034	spin_unlock_bh(&bp->phy_lock);
7035
7036	return err;
7037}
7038
7039static void
7040bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7041{
7042	struct bnx2 *bp = netdev_priv(dev);
7043
7044	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
7045	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7046	strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7047}
7048
7049#define BNX2_REGDUMP_LEN		(32 * 1024)
7050
7051static int
7052bnx2_get_regs_len(struct net_device *dev)
7053{
7054	return BNX2_REGDUMP_LEN;
7055}
7056
7057static void
7058bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7059{
7060	u32 *p = _p, i, offset;
7061	u8 *orig_p = _p;
7062	struct bnx2 *bp = netdev_priv(dev);
7063	static const u32 reg_boundaries[] = {
7064		0x0000, 0x0098, 0x0400, 0x045c,
7065		0x0800, 0x0880, 0x0c00, 0x0c10,
7066		0x0c30, 0x0d08, 0x1000, 0x101c,
7067		0x1040, 0x1048, 0x1080, 0x10a4,
7068		0x1400, 0x1490, 0x1498, 0x14f0,
7069		0x1500, 0x155c, 0x1580, 0x15dc,
7070		0x1600, 0x1658, 0x1680, 0x16d8,
7071		0x1800, 0x1820, 0x1840, 0x1854,
7072		0x1880, 0x1894, 0x1900, 0x1984,
7073		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
7074		0x1c80, 0x1c94, 0x1d00, 0x1d84,
7075		0x2000, 0x2030, 0x23c0, 0x2400,
7076		0x2800, 0x2820, 0x2830, 0x2850,
7077		0x2b40, 0x2c10, 0x2fc0, 0x3058,
7078		0x3c00, 0x3c94, 0x4000, 0x4010,
7079		0x4080, 0x4090, 0x43c0, 0x4458,
7080		0x4c00, 0x4c18, 0x4c40, 0x4c54,
7081		0x4fc0, 0x5010, 0x53c0, 0x5444,
7082		0x5c00, 0x5c18, 0x5c80, 0x5c90,
7083		0x5fc0, 0x6000, 0x6400, 0x6428,
7084		0x6800, 0x6848, 0x684c, 0x6860,
7085		0x6888, 0x6910, 0x8000
7086	};
7087
7088	regs->version = 0;
7089
7090	memset(p, 0, BNX2_REGDUMP_LEN);
7091
7092	if (!netif_running(bp->dev))
7093		return;
7094
7095	i = 0;
7096	offset = reg_boundaries[0];
7097	p += offset;
7098	while (offset < BNX2_REGDUMP_LEN) {
7099		*p++ = BNX2_RD(bp, offset);
7100		offset += 4;
7101		if (offset == reg_boundaries[i + 1]) {
7102			offset = reg_boundaries[i + 2];
7103			p = (u32 *) (orig_p + offset);
7104			i += 2;
7105		}
7106	}
7107}
7108
7109static void
7110bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7111{
7112	struct bnx2 *bp = netdev_priv(dev);
7113
7114	if (bp->flags & BNX2_FLAG_NO_WOL) {
7115		wol->supported = 0;
7116		wol->wolopts = 0;
7117	}
7118	else {
7119		wol->supported = WAKE_MAGIC;
7120		if (bp->wol)
7121			wol->wolopts = WAKE_MAGIC;
7122		else
7123			wol->wolopts = 0;
7124	}
7125	memset(&wol->sopass, 0, sizeof(wol->sopass));
7126}
7127
7128static int
7129bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7130{
7131	struct bnx2 *bp = netdev_priv(dev);
7132
7133	if (wol->wolopts & ~WAKE_MAGIC)
7134		return -EINVAL;
7135
7136	if (wol->wolopts & WAKE_MAGIC) {
7137		if (bp->flags & BNX2_FLAG_NO_WOL)
7138			return -EINVAL;
7139
7140		bp->wol = 1;
7141	}
7142	else {
7143		bp->wol = 0;
7144	}
7145
7146	device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7147
7148	return 0;
7149}
7150
7151static int
7152bnx2_nway_reset(struct net_device *dev)
7153{
7154	struct bnx2 *bp = netdev_priv(dev);
7155	u32 bmcr;
7156
7157	if (!netif_running(dev))
7158		return -EAGAIN;
7159
7160	if (!(bp->autoneg & AUTONEG_SPEED)) {
7161		return -EINVAL;
7162	}
7163
7164	spin_lock_bh(&bp->phy_lock);
7165
7166	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7167		int rc;
7168
7169		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7170		spin_unlock_bh(&bp->phy_lock);
7171		return rc;
7172	}
7173
7174	/* Force a link down visible on the other side */
7175	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7176		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7177		spin_unlock_bh(&bp->phy_lock);
7178
7179		msleep(20);
7180
7181		spin_lock_bh(&bp->phy_lock);
7182
7183		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7184		bp->serdes_an_pending = 1;
7185		mod_timer(&bp->timer, jiffies + bp->current_interval);
7186	}
7187
7188	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7189	bmcr &= ~BMCR_LOOPBACK;
7190	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7191
7192	spin_unlock_bh(&bp->phy_lock);
7193
7194	return 0;
7195}
7196
7197static u32
7198bnx2_get_link(struct net_device *dev)
7199{
7200	struct bnx2 *bp = netdev_priv(dev);
7201
7202	return bp->link_up;
7203}
7204
7205static int
7206bnx2_get_eeprom_len(struct net_device *dev)
7207{
7208	struct bnx2 *bp = netdev_priv(dev);
7209
7210	if (!bp->flash_info)
7211		return 0;
7212
7213	return (int) bp->flash_size;
7214}
7215
7216static int
7217bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7218		u8 *eebuf)
7219{
7220	struct bnx2 *bp = netdev_priv(dev);
7221	int rc;
7222
7223	/* parameters already validated in ethtool_get_eeprom */
7224
7225	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7226
7227	return rc;
7228}
7229
7230static int
7231bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7232		u8 *eebuf)
7233{
7234	struct bnx2 *bp = netdev_priv(dev);
7235	int rc;
7236
7237	/* parameters already validated in ethtool_set_eeprom */
7238
7239	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7240
7241	return rc;
7242}
7243
7244static int
7245bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
 
 
7246{
7247	struct bnx2 *bp = netdev_priv(dev);
7248
7249	memset(coal, 0, sizeof(struct ethtool_coalesce));
7250
7251	coal->rx_coalesce_usecs = bp->rx_ticks;
7252	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7253	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7254	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7255
7256	coal->tx_coalesce_usecs = bp->tx_ticks;
7257	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7258	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7259	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7260
7261	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7262
7263	return 0;
7264}
7265
7266static int
7267bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
 
 
7268{
7269	struct bnx2 *bp = netdev_priv(dev);
7270
7271	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7272	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7273
7274	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7275	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7276
7277	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7278	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7279
7280	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7281	if (bp->rx_quick_cons_trip_int > 0xff)
7282		bp->rx_quick_cons_trip_int = 0xff;
7283
7284	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7285	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7286
7287	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7288	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7289
7290	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7291	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7292
7293	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7294	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7295		0xff;
7296
7297	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7298	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7299		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7300			bp->stats_ticks = USEC_PER_SEC;
7301	}
7302	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7303		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7304	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7305
7306	if (netif_running(bp->dev)) {
7307		bnx2_netif_stop(bp, true);
7308		bnx2_init_nic(bp, 0);
7309		bnx2_netif_start(bp, true);
7310	}
7311
7312	return 0;
7313}
7314
7315static void
7316bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 
 
7317{
7318	struct bnx2 *bp = netdev_priv(dev);
7319
7320	ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7321	ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7322
7323	ering->rx_pending = bp->rx_ring_size;
7324	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7325
7326	ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7327	ering->tx_pending = bp->tx_ring_size;
7328}
7329
7330static int
7331bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7332{
7333	if (netif_running(bp->dev)) {
7334		/* Reset will erase chipset stats; save them */
7335		bnx2_save_stats(bp);
7336
7337		bnx2_netif_stop(bp, true);
7338		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7339		if (reset_irq) {
7340			bnx2_free_irq(bp);
7341			bnx2_del_napi(bp);
7342		} else {
7343			__bnx2_free_irq(bp);
7344		}
7345		bnx2_free_skbs(bp);
7346		bnx2_free_mem(bp);
7347	}
7348
7349	bnx2_set_rx_ring_size(bp, rx);
7350	bp->tx_ring_size = tx;
7351
7352	if (netif_running(bp->dev)) {
7353		int rc = 0;
7354
7355		if (reset_irq) {
7356			rc = bnx2_setup_int_mode(bp, disable_msi);
7357			bnx2_init_napi(bp);
7358		}
7359
7360		if (!rc)
7361			rc = bnx2_alloc_mem(bp);
7362
7363		if (!rc)
7364			rc = bnx2_request_irq(bp);
7365
7366		if (!rc)
7367			rc = bnx2_init_nic(bp, 0);
7368
7369		if (rc) {
7370			bnx2_napi_enable(bp);
7371			dev_close(bp->dev);
7372			return rc;
7373		}
7374#ifdef BCM_CNIC
7375		mutex_lock(&bp->cnic_lock);
7376		/* Let cnic know about the new status block. */
7377		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7378			bnx2_setup_cnic_irq_info(bp);
7379		mutex_unlock(&bp->cnic_lock);
7380#endif
7381		bnx2_netif_start(bp, true);
7382	}
7383	return 0;
7384}
7385
7386static int
7387bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
 
 
7388{
7389	struct bnx2 *bp = netdev_priv(dev);
7390	int rc;
7391
7392	if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7393		(ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7394		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7395
7396		return -EINVAL;
7397	}
7398	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7399				   false);
7400	return rc;
7401}
7402
7403static void
7404bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7405{
7406	struct bnx2 *bp = netdev_priv(dev);
7407
7408	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7409	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7410	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7411}
7412
7413static int
7414bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7415{
7416	struct bnx2 *bp = netdev_priv(dev);
7417
7418	bp->req_flow_ctrl = 0;
7419	if (epause->rx_pause)
7420		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7421	if (epause->tx_pause)
7422		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7423
7424	if (epause->autoneg) {
7425		bp->autoneg |= AUTONEG_FLOW_CTRL;
7426	}
7427	else {
7428		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7429	}
7430
7431	if (netif_running(dev)) {
7432		spin_lock_bh(&bp->phy_lock);
7433		bnx2_setup_phy(bp, bp->phy_port);
7434		spin_unlock_bh(&bp->phy_lock);
7435	}
7436
7437	return 0;
7438}
7439
7440static struct {
7441	char string[ETH_GSTRING_LEN];
7442} bnx2_stats_str_arr[] = {
7443	{ "rx_bytes" },
7444	{ "rx_error_bytes" },
7445	{ "tx_bytes" },
7446	{ "tx_error_bytes" },
7447	{ "rx_ucast_packets" },
7448	{ "rx_mcast_packets" },
7449	{ "rx_bcast_packets" },
7450	{ "tx_ucast_packets" },
7451	{ "tx_mcast_packets" },
7452	{ "tx_bcast_packets" },
7453	{ "tx_mac_errors" },
7454	{ "tx_carrier_errors" },
7455	{ "rx_crc_errors" },
7456	{ "rx_align_errors" },
7457	{ "tx_single_collisions" },
7458	{ "tx_multi_collisions" },
7459	{ "tx_deferred" },
7460	{ "tx_excess_collisions" },
7461	{ "tx_late_collisions" },
7462	{ "tx_total_collisions" },
7463	{ "rx_fragments" },
7464	{ "rx_jabbers" },
7465	{ "rx_undersize_packets" },
7466	{ "rx_oversize_packets" },
7467	{ "rx_64_byte_packets" },
7468	{ "rx_65_to_127_byte_packets" },
7469	{ "rx_128_to_255_byte_packets" },
7470	{ "rx_256_to_511_byte_packets" },
7471	{ "rx_512_to_1023_byte_packets" },
7472	{ "rx_1024_to_1522_byte_packets" },
7473	{ "rx_1523_to_9022_byte_packets" },
7474	{ "tx_64_byte_packets" },
7475	{ "tx_65_to_127_byte_packets" },
7476	{ "tx_128_to_255_byte_packets" },
7477	{ "tx_256_to_511_byte_packets" },
7478	{ "tx_512_to_1023_byte_packets" },
7479	{ "tx_1024_to_1522_byte_packets" },
7480	{ "tx_1523_to_9022_byte_packets" },
7481	{ "rx_xon_frames" },
7482	{ "rx_xoff_frames" },
7483	{ "tx_xon_frames" },
7484	{ "tx_xoff_frames" },
7485	{ "rx_mac_ctrl_frames" },
7486	{ "rx_filtered_packets" },
7487	{ "rx_ftq_discards" },
7488	{ "rx_discards" },
7489	{ "rx_fw_discards" },
7490};
7491
7492#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7493
7494#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7495
7496static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7497    STATS_OFFSET32(stat_IfHCInOctets_hi),
7498    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7499    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7500    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7501    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7502    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7503    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7504    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7505    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7506    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7507    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7508    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7509    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7510    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7511    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7512    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7513    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7514    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7515    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7516    STATS_OFFSET32(stat_EtherStatsCollisions),
7517    STATS_OFFSET32(stat_EtherStatsFragments),
7518    STATS_OFFSET32(stat_EtherStatsJabbers),
7519    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7520    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7521    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7522    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7523    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7524    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7525    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7526    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7527    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7528    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7529    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7530    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7531    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7532    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7533    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7534    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7535    STATS_OFFSET32(stat_XonPauseFramesReceived),
7536    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7537    STATS_OFFSET32(stat_OutXonSent),
7538    STATS_OFFSET32(stat_OutXoffSent),
7539    STATS_OFFSET32(stat_MacControlFramesReceived),
7540    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7541    STATS_OFFSET32(stat_IfInFTQDiscards),
7542    STATS_OFFSET32(stat_IfInMBUFDiscards),
7543    STATS_OFFSET32(stat_FwRxDrop),
7544};
7545
7546/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7547 * skipped because of errata.
7548 */
7549static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7550	8,0,8,8,8,8,8,8,8,8,
7551	4,0,4,4,4,4,4,4,4,4,
7552	4,4,4,4,4,4,4,4,4,4,
7553	4,4,4,4,4,4,4,4,4,4,
7554	4,4,4,4,4,4,4,
7555};
7556
7557static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7558	8,0,8,8,8,8,8,8,8,8,
7559	4,4,4,4,4,4,4,4,4,4,
7560	4,4,4,4,4,4,4,4,4,4,
7561	4,4,4,4,4,4,4,4,4,4,
7562	4,4,4,4,4,4,4,
7563};
7564
7565#define BNX2_NUM_TESTS 6
7566
7567static struct {
7568	char string[ETH_GSTRING_LEN];
7569} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7570	{ "register_test (offline)" },
7571	{ "memory_test (offline)" },
7572	{ "loopback_test (offline)" },
7573	{ "nvram_test (online)" },
7574	{ "interrupt_test (online)" },
7575	{ "link_test (online)" },
7576};
7577
7578static int
7579bnx2_get_sset_count(struct net_device *dev, int sset)
7580{
7581	switch (sset) {
7582	case ETH_SS_TEST:
7583		return BNX2_NUM_TESTS;
7584	case ETH_SS_STATS:
7585		return BNX2_NUM_STATS;
7586	default:
7587		return -EOPNOTSUPP;
7588	}
7589}
7590
7591static void
7592bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7593{
7594	struct bnx2 *bp = netdev_priv(dev);
7595
7596	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7597	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7598		int i;
7599
7600		bnx2_netif_stop(bp, true);
7601		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7602		bnx2_free_skbs(bp);
7603
7604		if (bnx2_test_registers(bp) != 0) {
7605			buf[0] = 1;
7606			etest->flags |= ETH_TEST_FL_FAILED;
7607		}
7608		if (bnx2_test_memory(bp) != 0) {
7609			buf[1] = 1;
7610			etest->flags |= ETH_TEST_FL_FAILED;
7611		}
7612		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7613			etest->flags |= ETH_TEST_FL_FAILED;
7614
7615		if (!netif_running(bp->dev))
7616			bnx2_shutdown_chip(bp);
7617		else {
7618			bnx2_init_nic(bp, 1);
7619			bnx2_netif_start(bp, true);
7620		}
7621
7622		/* wait for link up */
7623		for (i = 0; i < 7; i++) {
7624			if (bp->link_up)
7625				break;
7626			msleep_interruptible(1000);
7627		}
7628	}
7629
7630	if (bnx2_test_nvram(bp) != 0) {
7631		buf[3] = 1;
7632		etest->flags |= ETH_TEST_FL_FAILED;
7633	}
7634	if (bnx2_test_intr(bp) != 0) {
7635		buf[4] = 1;
7636		etest->flags |= ETH_TEST_FL_FAILED;
7637	}
7638
7639	if (bnx2_test_link(bp) != 0) {
7640		buf[5] = 1;
7641		etest->flags |= ETH_TEST_FL_FAILED;
7642
7643	}
7644}
7645
7646static void
7647bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7648{
7649	switch (stringset) {
7650	case ETH_SS_STATS:
7651		memcpy(buf, bnx2_stats_str_arr,
7652			sizeof(bnx2_stats_str_arr));
7653		break;
7654	case ETH_SS_TEST:
7655		memcpy(buf, bnx2_tests_str_arr,
7656			sizeof(bnx2_tests_str_arr));
7657		break;
7658	}
7659}
7660
7661static void
7662bnx2_get_ethtool_stats(struct net_device *dev,
7663		struct ethtool_stats *stats, u64 *buf)
7664{
7665	struct bnx2 *bp = netdev_priv(dev);
7666	int i;
7667	u32 *hw_stats = (u32 *) bp->stats_blk;
7668	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7669	u8 *stats_len_arr = NULL;
7670
7671	if (!hw_stats) {
7672		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7673		return;
7674	}
7675
7676	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7677	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7678	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7679	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7680		stats_len_arr = bnx2_5706_stats_len_arr;
7681	else
7682		stats_len_arr = bnx2_5708_stats_len_arr;
7683
7684	for (i = 0; i < BNX2_NUM_STATS; i++) {
7685		unsigned long offset;
7686
7687		if (stats_len_arr[i] == 0) {
7688			/* skip this counter */
7689			buf[i] = 0;
7690			continue;
7691		}
7692
7693		offset = bnx2_stats_offset_arr[i];
7694		if (stats_len_arr[i] == 4) {
7695			/* 4-byte counter */
7696			buf[i] = (u64) *(hw_stats + offset) +
7697				 *(temp_stats + offset);
7698			continue;
7699		}
7700		/* 8-byte counter */
7701		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7702			 *(hw_stats + offset + 1) +
7703			 (((u64) *(temp_stats + offset)) << 32) +
7704			 *(temp_stats + offset + 1);
7705	}
7706}
7707
7708static int
7709bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7710{
7711	struct bnx2 *bp = netdev_priv(dev);
7712
7713	switch (state) {
7714	case ETHTOOL_ID_ACTIVE:
7715		bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7716		BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7717		return 1;	/* cycle on/off once per second */
7718
7719	case ETHTOOL_ID_ON:
7720		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7721			BNX2_EMAC_LED_1000MB_OVERRIDE |
7722			BNX2_EMAC_LED_100MB_OVERRIDE |
7723			BNX2_EMAC_LED_10MB_OVERRIDE |
7724			BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7725			BNX2_EMAC_LED_TRAFFIC);
7726		break;
7727
7728	case ETHTOOL_ID_OFF:
7729		BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7730		break;
7731
7732	case ETHTOOL_ID_INACTIVE:
7733		BNX2_WR(bp, BNX2_EMAC_LED, 0);
7734		BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7735		break;
7736	}
7737
7738	return 0;
7739}
7740
7741static int
7742bnx2_set_features(struct net_device *dev, netdev_features_t features)
7743{
7744	struct bnx2 *bp = netdev_priv(dev);
7745
7746	/* TSO with VLAN tag won't work with current firmware */
7747	if (features & NETIF_F_HW_VLAN_CTAG_TX)
7748		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
7749	else
7750		dev->vlan_features &= ~NETIF_F_ALL_TSO;
7751
7752	if ((!!(features & NETIF_F_HW_VLAN_CTAG_RX) !=
7753	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7754	    netif_running(dev)) {
7755		bnx2_netif_stop(bp, false);
7756		dev->features = features;
7757		bnx2_set_rx_mode(dev);
7758		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7759		bnx2_netif_start(bp, false);
7760		return 1;
7761	}
7762
7763	return 0;
7764}
7765
7766static void bnx2_get_channels(struct net_device *dev,
7767			      struct ethtool_channels *channels)
7768{
7769	struct bnx2 *bp = netdev_priv(dev);
7770	u32 max_rx_rings = 1;
7771	u32 max_tx_rings = 1;
7772
7773	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7774		max_rx_rings = RX_MAX_RINGS;
7775		max_tx_rings = TX_MAX_RINGS;
7776	}
7777
7778	channels->max_rx = max_rx_rings;
7779	channels->max_tx = max_tx_rings;
7780	channels->max_other = 0;
7781	channels->max_combined = 0;
7782	channels->rx_count = bp->num_rx_rings;
7783	channels->tx_count = bp->num_tx_rings;
7784	channels->other_count = 0;
7785	channels->combined_count = 0;
7786}
7787
7788static int bnx2_set_channels(struct net_device *dev,
7789			      struct ethtool_channels *channels)
7790{
7791	struct bnx2 *bp = netdev_priv(dev);
7792	u32 max_rx_rings = 1;
7793	u32 max_tx_rings = 1;
7794	int rc = 0;
7795
7796	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7797		max_rx_rings = RX_MAX_RINGS;
7798		max_tx_rings = TX_MAX_RINGS;
7799	}
7800	if (channels->rx_count > max_rx_rings ||
7801	    channels->tx_count > max_tx_rings)
7802		return -EINVAL;
7803
7804	bp->num_req_rx_rings = channels->rx_count;
7805	bp->num_req_tx_rings = channels->tx_count;
7806
7807	if (netif_running(dev))
7808		rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7809					   bp->tx_ring_size, true);
7810
7811	return rc;
7812}
7813
7814static const struct ethtool_ops bnx2_ethtool_ops = {
7815	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
7816				     ETHTOOL_COALESCE_MAX_FRAMES |
7817				     ETHTOOL_COALESCE_USECS_IRQ |
7818				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
7819				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
7820	.get_drvinfo		= bnx2_get_drvinfo,
7821	.get_regs_len		= bnx2_get_regs_len,
7822	.get_regs		= bnx2_get_regs,
7823	.get_wol		= bnx2_get_wol,
7824	.set_wol		= bnx2_set_wol,
7825	.nway_reset		= bnx2_nway_reset,
7826	.get_link		= bnx2_get_link,
7827	.get_eeprom_len		= bnx2_get_eeprom_len,
7828	.get_eeprom		= bnx2_get_eeprom,
7829	.set_eeprom		= bnx2_set_eeprom,
7830	.get_coalesce		= bnx2_get_coalesce,
7831	.set_coalesce		= bnx2_set_coalesce,
7832	.get_ringparam		= bnx2_get_ringparam,
7833	.set_ringparam		= bnx2_set_ringparam,
7834	.get_pauseparam		= bnx2_get_pauseparam,
7835	.set_pauseparam		= bnx2_set_pauseparam,
7836	.self_test		= bnx2_self_test,
7837	.get_strings		= bnx2_get_strings,
7838	.set_phys_id		= bnx2_set_phys_id,
7839	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7840	.get_sset_count		= bnx2_get_sset_count,
7841	.get_channels		= bnx2_get_channels,
7842	.set_channels		= bnx2_set_channels,
7843	.get_link_ksettings	= bnx2_get_link_ksettings,
7844	.set_link_ksettings	= bnx2_set_link_ksettings,
7845};
7846
7847/* Called with rtnl_lock */
7848static int
7849bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7850{
7851	struct mii_ioctl_data *data = if_mii(ifr);
7852	struct bnx2 *bp = netdev_priv(dev);
7853	int err;
7854
7855	switch(cmd) {
7856	case SIOCGMIIPHY:
7857		data->phy_id = bp->phy_addr;
7858
7859		fallthrough;
7860	case SIOCGMIIREG: {
7861		u32 mii_regval;
7862
7863		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7864			return -EOPNOTSUPP;
7865
7866		if (!netif_running(dev))
7867			return -EAGAIN;
7868
7869		spin_lock_bh(&bp->phy_lock);
7870		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7871		spin_unlock_bh(&bp->phy_lock);
7872
7873		data->val_out = mii_regval;
7874
7875		return err;
7876	}
7877
7878	case SIOCSMIIREG:
7879		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7880			return -EOPNOTSUPP;
7881
7882		if (!netif_running(dev))
7883			return -EAGAIN;
7884
7885		spin_lock_bh(&bp->phy_lock);
7886		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7887		spin_unlock_bh(&bp->phy_lock);
7888
7889		return err;
7890
7891	default:
7892		/* do nothing */
7893		break;
7894	}
7895	return -EOPNOTSUPP;
7896}
7897
7898/* Called with rtnl_lock */
7899static int
7900bnx2_change_mac_addr(struct net_device *dev, void *p)
7901{
7902	struct sockaddr *addr = p;
7903	struct bnx2 *bp = netdev_priv(dev);
7904
7905	if (!is_valid_ether_addr(addr->sa_data))
7906		return -EADDRNOTAVAIL;
7907
7908	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7909	if (netif_running(dev))
7910		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7911
7912	return 0;
7913}
7914
7915/* Called with rtnl_lock */
7916static int
7917bnx2_change_mtu(struct net_device *dev, int new_mtu)
7918{
7919	struct bnx2 *bp = netdev_priv(dev);
7920
7921	dev->mtu = new_mtu;
7922	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7923				     false);
7924}
7925
7926#ifdef CONFIG_NET_POLL_CONTROLLER
7927static void
7928poll_bnx2(struct net_device *dev)
7929{
7930	struct bnx2 *bp = netdev_priv(dev);
7931	int i;
7932
7933	for (i = 0; i < bp->irq_nvecs; i++) {
7934		struct bnx2_irq *irq = &bp->irq_tbl[i];
7935
7936		disable_irq(irq->vector);
7937		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7938		enable_irq(irq->vector);
7939	}
7940}
7941#endif
7942
7943static void
7944bnx2_get_5709_media(struct bnx2 *bp)
7945{
7946	u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7947	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7948	u32 strap;
7949
7950	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7951		return;
7952	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7953		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7954		return;
7955	}
7956
7957	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7958		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7959	else
7960		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7961
7962	if (bp->func == 0) {
7963		switch (strap) {
7964		case 0x4:
7965		case 0x5:
7966		case 0x6:
7967			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7968			return;
7969		}
7970	} else {
7971		switch (strap) {
7972		case 0x1:
7973		case 0x2:
7974		case 0x4:
7975			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7976			return;
7977		}
7978	}
7979}
7980
7981static void
7982bnx2_get_pci_speed(struct bnx2 *bp)
7983{
7984	u32 reg;
7985
7986	reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7987	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7988		u32 clkreg;
7989
7990		bp->flags |= BNX2_FLAG_PCIX;
7991
7992		clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7993
7994		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7995		switch (clkreg) {
7996		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7997			bp->bus_speed_mhz = 133;
7998			break;
7999
8000		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
8001			bp->bus_speed_mhz = 100;
8002			break;
8003
8004		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
8005		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
8006			bp->bus_speed_mhz = 66;
8007			break;
8008
8009		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
8010		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
8011			bp->bus_speed_mhz = 50;
8012			break;
8013
8014		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
8015		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
8016		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
8017			bp->bus_speed_mhz = 33;
8018			break;
8019		}
8020	}
8021	else {
8022		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
8023			bp->bus_speed_mhz = 66;
8024		else
8025			bp->bus_speed_mhz = 33;
8026	}
8027
8028	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
8029		bp->flags |= BNX2_FLAG_PCI_32BIT;
8030
8031}
8032
8033static void
8034bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8035{
 
8036	int rc, i, j;
8037	u8 *data;
8038	unsigned int block_end, rosize, len;
8039
8040#define BNX2_VPD_NVRAM_OFFSET	0x300
8041#define BNX2_VPD_LEN		128
8042#define BNX2_MAX_VER_SLEN	30
8043
8044	data = kmalloc(256, GFP_KERNEL);
8045	if (!data)
8046		return;
8047
8048	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
8049			     BNX2_VPD_LEN);
8050	if (rc)
8051		goto vpd_done;
8052
8053	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
8054		data[i] = data[i + BNX2_VPD_LEN + 3];
8055		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
8056		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
8057		data[i + 3] = data[i + BNX2_VPD_LEN];
8058	}
8059
8060	i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
8061	if (i < 0)
8062		goto vpd_done;
8063
8064	rosize = pci_vpd_lrdt_size(&data[i]);
8065	i += PCI_VPD_LRDT_TAG_SIZE;
8066	block_end = i + rosize;
8067
8068	if (block_end > BNX2_VPD_LEN)
8069		goto vpd_done;
8070
8071	j = pci_vpd_find_info_keyword(data, i, rosize,
8072				      PCI_VPD_RO_KEYWORD_MFR_ID);
8073	if (j < 0)
8074		goto vpd_done;
8075
8076	len = pci_vpd_info_field_size(&data[j]);
8077
8078	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8079	if (j + len > block_end || len != 4 ||
8080	    memcmp(&data[j], "1028", 4))
8081		goto vpd_done;
8082
8083	j = pci_vpd_find_info_keyword(data, i, rosize,
8084				      PCI_VPD_RO_KEYWORD_VENDOR0);
 
8085	if (j < 0)
8086		goto vpd_done;
8087
8088	len = pci_vpd_info_field_size(&data[j]);
8089
8090	j += PCI_VPD_INFO_FLD_HDR_SIZE;
8091	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
8092		goto vpd_done;
8093
8094	memcpy(bp->fw_version, &data[j], len);
8095	bp->fw_version[len] = ' ';
8096
8097vpd_done:
8098	kfree(data);
8099}
8100
8101static int
8102bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8103{
8104	struct bnx2 *bp;
8105	int rc, i, j;
8106	u32 reg;
8107	u64 dma_mask, persist_dma_mask;
8108	int err;
8109
8110	SET_NETDEV_DEV(dev, &pdev->dev);
8111	bp = netdev_priv(dev);
8112
8113	bp->flags = 0;
8114	bp->phy_flags = 0;
8115
8116	bp->temp_stats_blk =
8117		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
8118
8119	if (!bp->temp_stats_blk) {
8120		rc = -ENOMEM;
8121		goto err_out;
8122	}
8123
8124	/* enable device (incl. PCI PM wakeup), and bus-mastering */
8125	rc = pci_enable_device(pdev);
8126	if (rc) {
8127		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
8128		goto err_out;
8129	}
8130
8131	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8132		dev_err(&pdev->dev,
8133			"Cannot find PCI device base address, aborting\n");
8134		rc = -ENODEV;
8135		goto err_out_disable;
8136	}
8137
8138	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8139	if (rc) {
8140		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
8141		goto err_out_disable;
8142	}
8143
8144	pci_set_master(pdev);
8145
8146	bp->pm_cap = pdev->pm_cap;
8147	if (bp->pm_cap == 0) {
8148		dev_err(&pdev->dev,
8149			"Cannot find power management capability, aborting\n");
8150		rc = -EIO;
8151		goto err_out_release;
8152	}
8153
8154	bp->dev = dev;
8155	bp->pdev = pdev;
8156
8157	spin_lock_init(&bp->phy_lock);
8158	spin_lock_init(&bp->indirect_lock);
8159#ifdef BCM_CNIC
8160	mutex_init(&bp->cnic_lock);
8161#endif
8162	INIT_WORK(&bp->reset_task, bnx2_reset_task);
8163
8164	bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8165							 TX_MAX_TSS_RINGS + 1));
8166	if (!bp->regview) {
8167		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8168		rc = -ENOMEM;
8169		goto err_out_release;
8170	}
8171
8172	/* Configure byte swap and enable write to the reg_window registers.
8173	 * Rely on CPU to do target byte swapping on big endian systems
8174	 * The chip's target access swapping will not swap all accesses
8175	 */
8176	BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8177		BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8178		BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8179
8180	bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8181
8182	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8183		if (!pci_is_pcie(pdev)) {
8184			dev_err(&pdev->dev, "Not PCIE, aborting\n");
8185			rc = -EIO;
8186			goto err_out_unmap;
8187		}
8188		bp->flags |= BNX2_FLAG_PCIE;
8189		if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8190			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8191
8192		/* AER (Advanced Error Reporting) hooks */
8193		err = pci_enable_pcie_error_reporting(pdev);
8194		if (!err)
8195			bp->flags |= BNX2_FLAG_AER_ENABLED;
8196
8197	} else {
8198		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8199		if (bp->pcix_cap == 0) {
8200			dev_err(&pdev->dev,
8201				"Cannot find PCIX capability, aborting\n");
8202			rc = -EIO;
8203			goto err_out_unmap;
8204		}
8205		bp->flags |= BNX2_FLAG_BROKEN_STATS;
8206	}
8207
8208	if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8209	    BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8210		if (pdev->msix_cap)
8211			bp->flags |= BNX2_FLAG_MSIX_CAP;
8212	}
8213
8214	if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8215	    BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8216		if (pdev->msi_cap)
8217			bp->flags |= BNX2_FLAG_MSI_CAP;
8218	}
8219
8220	/* 5708 cannot support DMA addresses > 40-bit.  */
8221	if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8222		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8223	else
8224		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8225
8226	/* Configure DMA attributes. */
8227	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8228		dev->features |= NETIF_F_HIGHDMA;
8229		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8230		if (rc) {
8231			dev_err(&pdev->dev,
8232				"pci_set_consistent_dma_mask failed, aborting\n");
8233			goto err_out_unmap;
8234		}
8235	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8236		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8237		goto err_out_unmap;
8238	}
8239
8240	if (!(bp->flags & BNX2_FLAG_PCIE))
8241		bnx2_get_pci_speed(bp);
8242
8243	/* 5706A0 may falsely detect SERR and PERR. */
8244	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8245		reg = BNX2_RD(bp, PCI_COMMAND);
8246		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8247		BNX2_WR(bp, PCI_COMMAND, reg);
8248	} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8249		!(bp->flags & BNX2_FLAG_PCIX)) {
8250		dev_err(&pdev->dev,
8251			"5706 A1 can only be used in a PCIX bus, aborting\n");
8252		rc = -EPERM;
8253		goto err_out_unmap;
8254	}
8255
8256	bnx2_init_nvram(bp);
8257
8258	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8259
8260	if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8261		bp->func = 1;
8262
8263	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8264	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8265		u32 off = bp->func << 2;
8266
8267		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8268	} else
8269		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8270
8271	/* Get the permanent MAC address.  First we need to make sure the
8272	 * firmware is actually running.
8273	 */
8274	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8275
8276	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8277	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8278		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8279		rc = -ENODEV;
8280		goto err_out_unmap;
8281	}
8282
8283	bnx2_read_vpd_fw_ver(bp);
8284
8285	j = strlen(bp->fw_version);
8286	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8287	for (i = 0; i < 3 && j < 24; i++) {
8288		u8 num, k, skip0;
8289
8290		if (i == 0) {
8291			bp->fw_version[j++] = 'b';
8292			bp->fw_version[j++] = 'c';
8293			bp->fw_version[j++] = ' ';
8294		}
8295		num = (u8) (reg >> (24 - (i * 8)));
8296		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8297			if (num >= k || !skip0 || k == 1) {
8298				bp->fw_version[j++] = (num / k) + '0';
8299				skip0 = 0;
8300			}
8301		}
8302		if (i != 2)
8303			bp->fw_version[j++] = '.';
8304	}
8305	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8306	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8307		bp->wol = 1;
8308
8309	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8310		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8311
8312		for (i = 0; i < 30; i++) {
8313			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8314			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8315				break;
8316			msleep(10);
8317		}
8318	}
8319	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8320	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8321	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8322	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8323		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8324
8325		if (j < 32)
8326			bp->fw_version[j++] = ' ';
8327		for (i = 0; i < 3 && j < 28; i++) {
8328			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8329			reg = be32_to_cpu(reg);
8330			memcpy(&bp->fw_version[j], &reg, 4);
8331			j += 4;
8332		}
8333	}
8334
8335	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8336	bp->mac_addr[0] = (u8) (reg >> 8);
8337	bp->mac_addr[1] = (u8) reg;
8338
8339	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8340	bp->mac_addr[2] = (u8) (reg >> 24);
8341	bp->mac_addr[3] = (u8) (reg >> 16);
8342	bp->mac_addr[4] = (u8) (reg >> 8);
8343	bp->mac_addr[5] = (u8) reg;
8344
8345	bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8346	bnx2_set_rx_ring_size(bp, 255);
8347
8348	bp->tx_quick_cons_trip_int = 2;
8349	bp->tx_quick_cons_trip = 20;
8350	bp->tx_ticks_int = 18;
8351	bp->tx_ticks = 80;
8352
8353	bp->rx_quick_cons_trip_int = 2;
8354	bp->rx_quick_cons_trip = 12;
8355	bp->rx_ticks_int = 18;
8356	bp->rx_ticks = 18;
8357
8358	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8359
8360	bp->current_interval = BNX2_TIMER_INTERVAL;
8361
8362	bp->phy_addr = 1;
8363
8364	/* allocate stats_blk */
8365	rc = bnx2_alloc_stats_blk(dev);
8366	if (rc)
8367		goto err_out_unmap;
8368
8369	/* Disable WOL support if we are running on a SERDES chip. */
8370	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8371		bnx2_get_5709_media(bp);
8372	else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8373		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8374
8375	bp->phy_port = PORT_TP;
8376	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8377		bp->phy_port = PORT_FIBRE;
8378		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8379		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8380			bp->flags |= BNX2_FLAG_NO_WOL;
8381			bp->wol = 0;
8382		}
8383		if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8384			/* Don't do parallel detect on this board because of
8385			 * some board problems.  The link will not go down
8386			 * if we do parallel detect.
8387			 */
8388			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8389			    pdev->subsystem_device == 0x310c)
8390				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8391		} else {
8392			bp->phy_addr = 2;
8393			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8394				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8395		}
8396	} else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8397		   BNX2_CHIP(bp) == BNX2_CHIP_5708)
8398		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8399	else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8400		 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8401		  BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8402		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8403
8404	bnx2_init_fw_cap(bp);
8405
8406	if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8407	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8408	    (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8409	    !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8410		bp->flags |= BNX2_FLAG_NO_WOL;
8411		bp->wol = 0;
8412	}
8413
8414	if (bp->flags & BNX2_FLAG_NO_WOL)
8415		device_set_wakeup_capable(&bp->pdev->dev, false);
8416	else
8417		device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8418
8419	if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8420		bp->tx_quick_cons_trip_int =
8421			bp->tx_quick_cons_trip;
8422		bp->tx_ticks_int = bp->tx_ticks;
8423		bp->rx_quick_cons_trip_int =
8424			bp->rx_quick_cons_trip;
8425		bp->rx_ticks_int = bp->rx_ticks;
8426		bp->comp_prod_trip_int = bp->comp_prod_trip;
8427		bp->com_ticks_int = bp->com_ticks;
8428		bp->cmd_ticks_int = bp->cmd_ticks;
8429	}
8430
8431	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8432	 *
8433	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8434	 * with byte enables disabled on the unused 32-bit word.  This is legal
8435	 * but causes problems on the AMD 8132 which will eventually stop
8436	 * responding after a while.
8437	 *
8438	 * AMD believes this incompatibility is unique to the 5706, and
8439	 * prefers to locally disable MSI rather than globally disabling it.
8440	 */
8441	if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8442		struct pci_dev *amd_8132 = NULL;
8443
8444		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8445						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8446						  amd_8132))) {
8447
8448			if (amd_8132->revision >= 0x10 &&
8449			    amd_8132->revision <= 0x13) {
8450				disable_msi = 1;
8451				pci_dev_put(amd_8132);
8452				break;
8453			}
8454		}
8455	}
8456
8457	bnx2_set_default_link(bp);
8458	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8459
8460	timer_setup(&bp->timer, bnx2_timer, 0);
8461	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8462
8463#ifdef BCM_CNIC
8464	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8465		bp->cnic_eth_dev.max_iscsi_conn =
8466			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8467			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8468	bp->cnic_probe = bnx2_cnic_probe;
8469#endif
8470	pci_save_state(pdev);
8471
8472	return 0;
8473
8474err_out_unmap:
8475	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8476		pci_disable_pcie_error_reporting(pdev);
8477		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8478	}
8479
8480	pci_iounmap(pdev, bp->regview);
8481	bp->regview = NULL;
8482
8483err_out_release:
8484	pci_release_regions(pdev);
8485
8486err_out_disable:
8487	pci_disable_device(pdev);
8488
8489err_out:
8490	kfree(bp->temp_stats_blk);
8491
8492	return rc;
8493}
8494
8495static char *
8496bnx2_bus_string(struct bnx2 *bp, char *str)
8497{
8498	char *s = str;
8499
8500	if (bp->flags & BNX2_FLAG_PCIE) {
8501		s += sprintf(s, "PCI Express");
8502	} else {
8503		s += sprintf(s, "PCI");
8504		if (bp->flags & BNX2_FLAG_PCIX)
8505			s += sprintf(s, "-X");
8506		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8507			s += sprintf(s, " 32-bit");
8508		else
8509			s += sprintf(s, " 64-bit");
8510		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8511	}
8512	return str;
8513}
8514
8515static void
8516bnx2_del_napi(struct bnx2 *bp)
8517{
8518	int i;
8519
8520	for (i = 0; i < bp->irq_nvecs; i++)
8521		netif_napi_del(&bp->bnx2_napi[i].napi);
8522}
8523
8524static void
8525bnx2_init_napi(struct bnx2 *bp)
8526{
8527	int i;
8528
8529	for (i = 0; i < bp->irq_nvecs; i++) {
8530		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8531		int (*poll)(struct napi_struct *, int);
8532
8533		if (i == 0)
8534			poll = bnx2_poll;
8535		else
8536			poll = bnx2_poll_msix;
8537
8538		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8539		bnapi->bp = bp;
8540	}
8541}
8542
8543static const struct net_device_ops bnx2_netdev_ops = {
8544	.ndo_open		= bnx2_open,
8545	.ndo_start_xmit		= bnx2_start_xmit,
8546	.ndo_stop		= bnx2_close,
8547	.ndo_get_stats64	= bnx2_get_stats64,
8548	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8549	.ndo_do_ioctl		= bnx2_ioctl,
8550	.ndo_validate_addr	= eth_validate_addr,
8551	.ndo_set_mac_address	= bnx2_change_mac_addr,
8552	.ndo_change_mtu		= bnx2_change_mtu,
8553	.ndo_set_features	= bnx2_set_features,
8554	.ndo_tx_timeout		= bnx2_tx_timeout,
8555#ifdef CONFIG_NET_POLL_CONTROLLER
8556	.ndo_poll_controller	= poll_bnx2,
8557#endif
8558};
8559
8560static int
8561bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8562{
8563	struct net_device *dev;
8564	struct bnx2 *bp;
8565	int rc;
8566	char str[40];
8567
8568	/* dev zeroed in init_etherdev */
8569	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8570	if (!dev)
8571		return -ENOMEM;
8572
8573	rc = bnx2_init_board(pdev, dev);
8574	if (rc < 0)
8575		goto err_free;
8576
8577	dev->netdev_ops = &bnx2_netdev_ops;
8578	dev->watchdog_timeo = TX_TIMEOUT;
8579	dev->ethtool_ops = &bnx2_ethtool_ops;
8580
8581	bp = netdev_priv(dev);
8582
8583	pci_set_drvdata(pdev, dev);
8584
8585	/*
8586	 * In-flight DMA from 1st kernel could continue going in kdump kernel.
8587	 * New io-page table has been created before bnx2 does reset at open stage.
8588	 * We have to wait for the in-flight DMA to complete to avoid it look up
8589	 * into the newly created io-page table.
8590	 */
8591	if (is_kdump_kernel())
8592		bnx2_wait_dma_complete(bp);
8593
8594	memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8595
8596	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
8597		NETIF_F_TSO | NETIF_F_TSO_ECN |
8598		NETIF_F_RXHASH | NETIF_F_RXCSUM;
8599
8600	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8601		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8602
8603	dev->vlan_features = dev->hw_features;
8604	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8605	dev->features |= dev->hw_features;
8606	dev->priv_flags |= IFF_UNICAST_FLT;
8607	dev->min_mtu = MIN_ETHERNET_PACKET_SIZE;
8608	dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE;
8609
8610	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8611		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8612
8613	if ((rc = register_netdev(dev))) {
8614		dev_err(&pdev->dev, "Cannot register net device\n");
8615		goto error;
8616	}
8617
8618	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8619		    "node addr %pM\n", board_info[ent->driver_data].name,
8620		    ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8621		    ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8622		    bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8623		    pdev->irq, dev->dev_addr);
8624
8625	return 0;
8626
8627error:
8628	pci_iounmap(pdev, bp->regview);
8629	pci_release_regions(pdev);
8630	pci_disable_device(pdev);
8631err_free:
8632	bnx2_free_stats_blk(dev);
8633	free_netdev(dev);
8634	return rc;
8635}
8636
8637static void
8638bnx2_remove_one(struct pci_dev *pdev)
8639{
8640	struct net_device *dev = pci_get_drvdata(pdev);
8641	struct bnx2 *bp = netdev_priv(dev);
8642
8643	unregister_netdev(dev);
8644
8645	del_timer_sync(&bp->timer);
8646	cancel_work_sync(&bp->reset_task);
8647
8648	pci_iounmap(bp->pdev, bp->regview);
8649
8650	bnx2_free_stats_blk(dev);
8651	kfree(bp->temp_stats_blk);
8652
8653	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
8654		pci_disable_pcie_error_reporting(pdev);
8655		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8656	}
8657
8658	bnx2_release_firmware(bp);
8659
8660	free_netdev(dev);
8661
8662	pci_release_regions(pdev);
8663	pci_disable_device(pdev);
8664}
8665
8666#ifdef CONFIG_PM_SLEEP
8667static int
8668bnx2_suspend(struct device *device)
8669{
8670	struct net_device *dev = dev_get_drvdata(device);
8671	struct bnx2 *bp = netdev_priv(dev);
8672
8673	if (netif_running(dev)) {
8674		cancel_work_sync(&bp->reset_task);
8675		bnx2_netif_stop(bp, true);
8676		netif_device_detach(dev);
8677		del_timer_sync(&bp->timer);
8678		bnx2_shutdown_chip(bp);
8679		__bnx2_free_irq(bp);
8680		bnx2_free_skbs(bp);
8681	}
8682	bnx2_setup_wol(bp);
8683	return 0;
8684}
8685
8686static int
8687bnx2_resume(struct device *device)
8688{
8689	struct net_device *dev = dev_get_drvdata(device);
8690	struct bnx2 *bp = netdev_priv(dev);
8691
8692	if (!netif_running(dev))
8693		return 0;
8694
8695	bnx2_set_power_state(bp, PCI_D0);
8696	netif_device_attach(dev);
8697	bnx2_request_irq(bp);
8698	bnx2_init_nic(bp, 1);
8699	bnx2_netif_start(bp, true);
8700	return 0;
8701}
8702
8703static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8704#define BNX2_PM_OPS (&bnx2_pm_ops)
8705
8706#else
8707
8708#define BNX2_PM_OPS NULL
8709
8710#endif /* CONFIG_PM_SLEEP */
8711/**
8712 * bnx2_io_error_detected - called when PCI error is detected
8713 * @pdev: Pointer to PCI device
8714 * @state: The current pci connection state
8715 *
8716 * This function is called after a PCI bus error affecting
8717 * this device has been detected.
8718 */
8719static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8720					       pci_channel_state_t state)
8721{
8722	struct net_device *dev = pci_get_drvdata(pdev);
8723	struct bnx2 *bp = netdev_priv(dev);
8724
8725	rtnl_lock();
8726	netif_device_detach(dev);
8727
8728	if (state == pci_channel_io_perm_failure) {
8729		rtnl_unlock();
8730		return PCI_ERS_RESULT_DISCONNECT;
8731	}
8732
8733	if (netif_running(dev)) {
8734		bnx2_netif_stop(bp, true);
8735		del_timer_sync(&bp->timer);
8736		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8737	}
8738
8739	pci_disable_device(pdev);
8740	rtnl_unlock();
8741
8742	/* Request a slot slot reset. */
8743	return PCI_ERS_RESULT_NEED_RESET;
8744}
8745
8746/**
8747 * bnx2_io_slot_reset - called after the pci bus has been reset.
8748 * @pdev: Pointer to PCI device
8749 *
8750 * Restart the card from scratch, as if from a cold-boot.
8751 */
8752static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8753{
8754	struct net_device *dev = pci_get_drvdata(pdev);
8755	struct bnx2 *bp = netdev_priv(dev);
8756	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8757	int err = 0;
8758
8759	rtnl_lock();
8760	if (pci_enable_device(pdev)) {
8761		dev_err(&pdev->dev,
8762			"Cannot re-enable PCI device after reset\n");
8763	} else {
8764		pci_set_master(pdev);
8765		pci_restore_state(pdev);
8766		pci_save_state(pdev);
8767
8768		if (netif_running(dev))
8769			err = bnx2_init_nic(bp, 1);
8770
8771		if (!err)
8772			result = PCI_ERS_RESULT_RECOVERED;
8773	}
8774
8775	if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
8776		bnx2_napi_enable(bp);
8777		dev_close(dev);
8778	}
8779	rtnl_unlock();
8780
8781	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
8782		return result;
8783
8784	return result;
8785}
8786
8787/**
8788 * bnx2_io_resume - called when traffic can start flowing again.
8789 * @pdev: Pointer to PCI device
8790 *
8791 * This callback is called when the error recovery driver tells us that
8792 * its OK to resume normal operation.
8793 */
8794static void bnx2_io_resume(struct pci_dev *pdev)
8795{
8796	struct net_device *dev = pci_get_drvdata(pdev);
8797	struct bnx2 *bp = netdev_priv(dev);
8798
8799	rtnl_lock();
8800	if (netif_running(dev))
8801		bnx2_netif_start(bp, true);
8802
8803	netif_device_attach(dev);
8804	rtnl_unlock();
8805}
8806
8807static void bnx2_shutdown(struct pci_dev *pdev)
8808{
8809	struct net_device *dev = pci_get_drvdata(pdev);
8810	struct bnx2 *bp;
8811
8812	if (!dev)
8813		return;
8814
8815	bp = netdev_priv(dev);
8816	if (!bp)
8817		return;
8818
8819	rtnl_lock();
8820	if (netif_running(dev))
8821		dev_close(bp->dev);
8822
8823	if (system_state == SYSTEM_POWER_OFF)
8824		bnx2_set_power_state(bp, PCI_D3hot);
8825
8826	rtnl_unlock();
8827}
8828
8829static const struct pci_error_handlers bnx2_err_handler = {
8830	.error_detected	= bnx2_io_error_detected,
8831	.slot_reset	= bnx2_io_slot_reset,
8832	.resume		= bnx2_io_resume,
8833};
8834
8835static struct pci_driver bnx2_pci_driver = {
8836	.name		= DRV_MODULE_NAME,
8837	.id_table	= bnx2_pci_tbl,
8838	.probe		= bnx2_init_one,
8839	.remove		= bnx2_remove_one,
8840	.driver.pm	= BNX2_PM_OPS,
8841	.err_handler	= &bnx2_err_handler,
8842	.shutdown	= bnx2_shutdown,
8843};
8844
8845module_pci_driver(bnx2_pci_driver);