Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * QLogic QLA3xxx NIC HBA Driver
   4 * Copyright (c)  2003-2006 QLogic Corporation
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/types.h>
  11#include <linux/module.h>
  12#include <linux/list.h>
  13#include <linux/pci.h>
  14#include <linux/dma-mapping.h>
  15#include <linux/sched.h>
  16#include <linux/slab.h>
  17#include <linux/dmapool.h>
  18#include <linux/mempool.h>
  19#include <linux/spinlock.h>
  20#include <linux/kthread.h>
  21#include <linux/interrupt.h>
  22#include <linux/errno.h>
  23#include <linux/ioport.h>
  24#include <linux/ip.h>
  25#include <linux/in.h>
  26#include <linux/if_arp.h>
  27#include <linux/if_ether.h>
  28#include <linux/netdevice.h>
  29#include <linux/etherdevice.h>
  30#include <linux/ethtool.h>
  31#include <linux/skbuff.h>
  32#include <linux/rtnetlink.h>
  33#include <linux/if_vlan.h>
  34#include <linux/delay.h>
  35#include <linux/mm.h>
  36#include <linux/prefetch.h>
  37
  38#include "qla3xxx.h"
  39
  40#define DRV_NAME	"qla3xxx"
  41#define DRV_STRING	"QLogic ISP3XXX Network Driver"
  42#define DRV_VERSION	"v2.03.00-k5"
  43
  44static const char ql3xxx_driver_name[] = DRV_NAME;
  45static const char ql3xxx_driver_version[] = DRV_VERSION;
  46
  47#define TIMED_OUT_MSG							\
  48"Timed out waiting for management port to get free before issuing command\n"
  49
  50MODULE_AUTHOR("QLogic Corporation");
  51MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
  52MODULE_LICENSE("GPL");
  53MODULE_VERSION(DRV_VERSION);
  54
  55static const u32 default_msg
  56    = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  57    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  58
  59static int debug = -1;		/* defaults above */
  60module_param(debug, int, 0);
  61MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  62
  63static int msi;
  64module_param(msi, int, 0);
  65MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
  66
  67static const struct pci_device_id ql3xxx_pci_tbl[] = {
  68	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
  69	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
  70	/* required last entry */
  71	{0,}
  72};
  73
  74MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
  75
  76/*
  77 *  These are the known PHY's which are used
  78 */
  79enum PHY_DEVICE_TYPE {
  80   PHY_TYPE_UNKNOWN   = 0,
  81   PHY_VITESSE_VSC8211,
  82   PHY_AGERE_ET1011C,
  83   MAX_PHY_DEV_TYPES
  84};
  85
  86struct PHY_DEVICE_INFO {
  87	const enum PHY_DEVICE_TYPE	phyDevice;
  88	const u32		phyIdOUI;
  89	const u16		phyIdModel;
  90	const char		*name;
  91};
  92
  93static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
  94	{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
  95	{PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
  96	{PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
  97};
  98
  99
 100/*
 101 * Caller must take hw_lock.
 102 */
 103static int ql_sem_spinlock(struct ql3_adapter *qdev,
 104			    u32 sem_mask, u32 sem_bits)
 105{
 106	struct ql3xxx_port_registers __iomem *port_regs =
 107		qdev->mem_map_registers;
 108	u32 value;
 109	unsigned int seconds = 3;
 110
 111	do {
 112		writel((sem_mask | sem_bits),
 113		       &port_regs->CommonRegs.semaphoreReg);
 114		value = readl(&port_regs->CommonRegs.semaphoreReg);
 115		if ((value & (sem_mask >> 16)) == sem_bits)
 116			return 0;
 117		mdelay(1000);
 118	} while (--seconds);
 119	return -1;
 120}
 121
 122static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 123{
 124	struct ql3xxx_port_registers __iomem *port_regs =
 125		qdev->mem_map_registers;
 126	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
 127	readl(&port_regs->CommonRegs.semaphoreReg);
 128}
 129
 130static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 131{
 132	struct ql3xxx_port_registers __iomem *port_regs =
 133		qdev->mem_map_registers;
 134	u32 value;
 135
 136	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
 137	value = readl(&port_regs->CommonRegs.semaphoreReg);
 138	return ((value & (sem_mask >> 16)) == sem_bits);
 139}
 140
 141/*
 142 * Caller holds hw_lock.
 143 */
 144static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 145{
 146	int i = 0;
 147
 148	do {
 149		if (ql_sem_lock(qdev,
 150				QL_DRVR_SEM_MASK,
 151				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
 152				 * 2) << 1)) {
 153			netdev_printk(KERN_DEBUG, qdev->ndev,
 154				      "driver lock acquired\n");
 155			return 1;
 156		}
 157		mdelay(1000);
 158	} while (++i < 10);
 159
 160	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
 161	return 0;
 162}
 163
 164static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
 165{
 166	struct ql3xxx_port_registers __iomem *port_regs =
 167		qdev->mem_map_registers;
 168
 169	writel(((ISP_CONTROL_NP_MASK << 16) | page),
 170			&port_regs->CommonRegs.ispControlStatus);
 171	readl(&port_regs->CommonRegs.ispControlStatus);
 172	qdev->current_page = page;
 173}
 174
 175static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 176{
 177	u32 value;
 178	unsigned long hw_flags;
 179
 180	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 181	value = readl(reg);
 182	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 183
 184	return value;
 185}
 186
 187static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 188{
 189	return readl(reg);
 190}
 191
 192static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 193{
 194	u32 value;
 195	unsigned long hw_flags;
 196
 197	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 198
 199	if (qdev->current_page != 0)
 200		ql_set_register_page(qdev, 0);
 201	value = readl(reg);
 202
 203	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 204	return value;
 205}
 206
 207static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
 208{
 209	if (qdev->current_page != 0)
 210		ql_set_register_page(qdev, 0);
 211	return readl(reg);
 212}
 213
 214static void ql_write_common_reg_l(struct ql3_adapter *qdev,
 215				u32 __iomem *reg, u32 value)
 216{
 217	unsigned long hw_flags;
 218
 219	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 220	writel(value, reg);
 221	readl(reg);
 222	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 223}
 224
 225static void ql_write_common_reg(struct ql3_adapter *qdev,
 226				u32 __iomem *reg, u32 value)
 227{
 228	writel(value, reg);
 229	readl(reg);
 230}
 231
 232static void ql_write_nvram_reg(struct ql3_adapter *qdev,
 233				u32 __iomem *reg, u32 value)
 234{
 235	writel(value, reg);
 236	readl(reg);
 237	udelay(1);
 238}
 239
 240static void ql_write_page0_reg(struct ql3_adapter *qdev,
 241			       u32 __iomem *reg, u32 value)
 242{
 243	if (qdev->current_page != 0)
 244		ql_set_register_page(qdev, 0);
 245	writel(value, reg);
 246	readl(reg);
 247}
 248
 249/*
 250 * Caller holds hw_lock. Only called during init.
 251 */
 252static void ql_write_page1_reg(struct ql3_adapter *qdev,
 253			       u32 __iomem *reg, u32 value)
 254{
 255	if (qdev->current_page != 1)
 256		ql_set_register_page(qdev, 1);
 257	writel(value, reg);
 258	readl(reg);
 259}
 260
 261/*
 262 * Caller holds hw_lock. Only called during init.
 263 */
 264static void ql_write_page2_reg(struct ql3_adapter *qdev,
 265			       u32 __iomem *reg, u32 value)
 266{
 267	if (qdev->current_page != 2)
 268		ql_set_register_page(qdev, 2);
 269	writel(value, reg);
 270	readl(reg);
 271}
 272
 273static void ql_disable_interrupts(struct ql3_adapter *qdev)
 274{
 275	struct ql3xxx_port_registers __iomem *port_regs =
 276		qdev->mem_map_registers;
 277
 278	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 279			    (ISP_IMR_ENABLE_INT << 16));
 280
 281}
 282
 283static void ql_enable_interrupts(struct ql3_adapter *qdev)
 284{
 285	struct ql3xxx_port_registers __iomem *port_regs =
 286		qdev->mem_map_registers;
 287
 288	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 289			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
 290
 291}
 292
 293static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
 294					    struct ql_rcv_buf_cb *lrg_buf_cb)
 295{
 296	dma_addr_t map;
 297	int err;
 298	lrg_buf_cb->next = NULL;
 299
 300	if (qdev->lrg_buf_free_tail == NULL) {	/* The list is empty  */
 301		qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
 302	} else {
 303		qdev->lrg_buf_free_tail->next = lrg_buf_cb;
 304		qdev->lrg_buf_free_tail = lrg_buf_cb;
 305	}
 306
 307	if (!lrg_buf_cb->skb) {
 308		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
 309						   qdev->lrg_buffer_len);
 310		if (unlikely(!lrg_buf_cb->skb)) {
 311			qdev->lrg_buf_skb_check++;
 312		} else {
 313			/*
 314			 * We save some space to copy the ethhdr from first
 315			 * buffer
 316			 */
 317			skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
 318			map = dma_map_single(&qdev->pdev->dev,
 319					     lrg_buf_cb->skb->data,
 320					     qdev->lrg_buffer_len - QL_HEADER_SPACE,
 321					     DMA_FROM_DEVICE);
 322			err = dma_mapping_error(&qdev->pdev->dev, map);
 323			if (err) {
 324				netdev_err(qdev->ndev,
 325					   "PCI mapping failed with error: %d\n",
 326					   err);
 327				dev_kfree_skb(lrg_buf_cb->skb);
 328				lrg_buf_cb->skb = NULL;
 329
 330				qdev->lrg_buf_skb_check++;
 331				return;
 332			}
 333
 334			lrg_buf_cb->buf_phy_addr_low =
 335			    cpu_to_le32(LS_64BITS(map));
 336			lrg_buf_cb->buf_phy_addr_high =
 337			    cpu_to_le32(MS_64BITS(map));
 338			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
 339			dma_unmap_len_set(lrg_buf_cb, maplen,
 340					  qdev->lrg_buffer_len -
 341					  QL_HEADER_SPACE);
 342		}
 343	}
 344
 345	qdev->lrg_buf_free_count++;
 346}
 347
 348static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
 349							   *qdev)
 350{
 351	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
 352
 353	if (lrg_buf_cb != NULL) {
 354		qdev->lrg_buf_free_head = lrg_buf_cb->next;
 355		if (qdev->lrg_buf_free_head == NULL)
 356			qdev->lrg_buf_free_tail = NULL;
 357		qdev->lrg_buf_free_count--;
 358	}
 359
 360	return lrg_buf_cb;
 361}
 362
 363static u32 addrBits = EEPROM_NO_ADDR_BITS;
 364static u32 dataBits = EEPROM_NO_DATA_BITS;
 365
 366static void fm93c56a_deselect(struct ql3_adapter *qdev);
 367static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
 368			    unsigned short *value);
 369
 370/*
 371 * Caller holds hw_lock.
 372 */
 373static void fm93c56a_select(struct ql3_adapter *qdev)
 374{
 375	struct ql3xxx_port_registers __iomem *port_regs =
 376			qdev->mem_map_registers;
 377	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 378
 379	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
 380	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 381}
 382
 383/*
 384 * Caller holds hw_lock.
 385 */
 386static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
 387{
 388	int i;
 389	u32 mask;
 390	u32 dataBit;
 391	u32 previousBit;
 392	struct ql3xxx_port_registers __iomem *port_regs =
 393			qdev->mem_map_registers;
 394	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 395
 396	/* Clock in a zero, then do the start bit */
 397	ql_write_nvram_reg(qdev, spir,
 398			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 399			    AUBURN_EEPROM_DO_1));
 400	ql_write_nvram_reg(qdev, spir,
 401			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 402			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
 403	ql_write_nvram_reg(qdev, spir,
 404			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 405			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
 406
 407	mask = 1 << (FM93C56A_CMD_BITS - 1);
 408	/* Force the previous data bit to be different */
 409	previousBit = 0xffff;
 410	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
 411		dataBit = (cmd & mask)
 412			? AUBURN_EEPROM_DO_1
 413			: AUBURN_EEPROM_DO_0;
 414		if (previousBit != dataBit) {
 415			/* If the bit changed, change the DO state to match */
 416			ql_write_nvram_reg(qdev, spir,
 417					   (ISP_NVRAM_MASK |
 418					    qdev->eeprom_cmd_data | dataBit));
 419			previousBit = dataBit;
 420		}
 421		ql_write_nvram_reg(qdev, spir,
 422				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 423				    dataBit | AUBURN_EEPROM_CLK_RISE));
 424		ql_write_nvram_reg(qdev, spir,
 425				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 426				    dataBit | AUBURN_EEPROM_CLK_FALL));
 427		cmd = cmd << 1;
 428	}
 429
 430	mask = 1 << (addrBits - 1);
 431	/* Force the previous data bit to be different */
 432	previousBit = 0xffff;
 433	for (i = 0; i < addrBits; i++) {
 434		dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
 435			: AUBURN_EEPROM_DO_0;
 436		if (previousBit != dataBit) {
 437			/*
 438			 * If the bit changed, then change the DO state to
 439			 * match
 440			 */
 441			ql_write_nvram_reg(qdev, spir,
 442					   (ISP_NVRAM_MASK |
 443					    qdev->eeprom_cmd_data | dataBit));
 444			previousBit = dataBit;
 445		}
 446		ql_write_nvram_reg(qdev, spir,
 447				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 448				    dataBit | AUBURN_EEPROM_CLK_RISE));
 449		ql_write_nvram_reg(qdev, spir,
 450				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 451				    dataBit | AUBURN_EEPROM_CLK_FALL));
 452		eepromAddr = eepromAddr << 1;
 453	}
 454}
 455
 456/*
 457 * Caller holds hw_lock.
 458 */
 459static void fm93c56a_deselect(struct ql3_adapter *qdev)
 460{
 461	struct ql3xxx_port_registers __iomem *port_regs =
 462			qdev->mem_map_registers;
 463	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 464
 465	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
 466	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 467}
 468
 469/*
 470 * Caller holds hw_lock.
 471 */
 472static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
 473{
 474	int i;
 475	u32 data = 0;
 476	u32 dataBit;
 477	struct ql3xxx_port_registers __iomem *port_regs =
 478			qdev->mem_map_registers;
 479	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
 480
 481	/* Read the data bits */
 482	/* The first bit is a dummy.  Clock right over it. */
 483	for (i = 0; i < dataBits; i++) {
 484		ql_write_nvram_reg(qdev, spir,
 485				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 486				   AUBURN_EEPROM_CLK_RISE);
 487		ql_write_nvram_reg(qdev, spir,
 488				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
 489				   AUBURN_EEPROM_CLK_FALL);
 490		dataBit = (ql_read_common_reg(qdev, spir) &
 491			   AUBURN_EEPROM_DI_1) ? 1 : 0;
 492		data = (data << 1) | dataBit;
 493	}
 494	*value = (u16)data;
 495}
 496
 497/*
 498 * Caller holds hw_lock.
 499 */
 500static void eeprom_readword(struct ql3_adapter *qdev,
 501			    u32 eepromAddr, unsigned short *value)
 502{
 503	fm93c56a_select(qdev);
 504	fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
 505	fm93c56a_datain(qdev, value);
 506	fm93c56a_deselect(qdev);
 507}
 508
 509static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
 510{
 511	__le16 *p = (__le16 *)ndev->dev_addr;
 512	p[0] = cpu_to_le16(addr[0]);
 513	p[1] = cpu_to_le16(addr[1]);
 514	p[2] = cpu_to_le16(addr[2]);
 515}
 516
 517static int ql_get_nvram_params(struct ql3_adapter *qdev)
 518{
 519	u16 *pEEPROMData;
 520	u16 checksum = 0;
 521	u32 index;
 522	unsigned long hw_flags;
 523
 524	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 525
 526	pEEPROMData = (u16 *)&qdev->nvram_data;
 527	qdev->eeprom_cmd_data = 0;
 528	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
 529			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
 530			 2) << 10)) {
 531		pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
 532		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 533		return -1;
 534	}
 535
 536	for (index = 0; index < EEPROM_SIZE; index++) {
 537		eeprom_readword(qdev, index, pEEPROMData);
 538		checksum += *pEEPROMData;
 539		pEEPROMData++;
 540	}
 541	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 542
 543	if (checksum != 0) {
 544		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
 545			   checksum);
 546		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 547		return -1;
 548	}
 549
 550	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 551	return checksum;
 552}
 553
 554static const u32 PHYAddr[2] = {
 555	PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
 556};
 557
 558static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
 559{
 560	struct ql3xxx_port_registers __iomem *port_regs =
 561			qdev->mem_map_registers;
 562	u32 temp;
 563	int count = 1000;
 564
 565	while (count) {
 566		temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
 567		if (!(temp & MAC_MII_STATUS_BSY))
 568			return 0;
 569		udelay(10);
 570		count--;
 571	}
 572	return -1;
 573}
 574
 575static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
 576{
 577	struct ql3xxx_port_registers __iomem *port_regs =
 578			qdev->mem_map_registers;
 579	u32 scanControl;
 580
 581	if (qdev->numPorts > 1) {
 582		/* Auto scan will cycle through multiple ports */
 583		scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
 584	} else {
 585		scanControl = MAC_MII_CONTROL_SC;
 586	}
 587
 588	/*
 589	 * Scan register 1 of PHY/PETBI,
 590	 * Set up to scan both devices
 591	 * The autoscan starts from the first register, completes
 592	 * the last one before rolling over to the first
 593	 */
 594	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 595			   PHYAddr[0] | MII_SCAN_REGISTER);
 596
 597	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 598			   (scanControl) |
 599			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
 600}
 601
 602static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
 603{
 604	u8 ret;
 605	struct ql3xxx_port_registers __iomem *port_regs =
 606					qdev->mem_map_registers;
 607
 608	/* See if scan mode is enabled before we turn it off */
 609	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
 610	    (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
 611		/* Scan is enabled */
 612		ret = 1;
 613	} else {
 614		/* Scan is disabled */
 615		ret = 0;
 616	}
 617
 618	/*
 619	 * When disabling scan mode you must first change the MII register
 620	 * address
 621	 */
 622	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 623			   PHYAddr[0] | MII_SCAN_REGISTER);
 624
 625	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 626			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
 627			     MAC_MII_CONTROL_RC) << 16));
 628
 629	return ret;
 630}
 631
 632static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
 633			       u16 regAddr, u16 value, u32 phyAddr)
 634{
 635	struct ql3xxx_port_registers __iomem *port_regs =
 636			qdev->mem_map_registers;
 637	u8 scanWasEnabled;
 638
 639	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 640
 641	if (ql_wait_for_mii_ready(qdev)) {
 642		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 643		return -1;
 644	}
 645
 646	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 647			   phyAddr | regAddr);
 648
 649	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 650
 651	/* Wait for write to complete 9/10/04 SJP */
 652	if (ql_wait_for_mii_ready(qdev)) {
 653		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 654		return -1;
 655	}
 656
 657	if (scanWasEnabled)
 658		ql_mii_enable_scan_mode(qdev);
 659
 660	return 0;
 661}
 662
 663static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
 664			      u16 *value, u32 phyAddr)
 665{
 666	struct ql3xxx_port_registers __iomem *port_regs =
 667			qdev->mem_map_registers;
 668	u8 scanWasEnabled;
 669	u32 temp;
 670
 671	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 672
 673	if (ql_wait_for_mii_ready(qdev)) {
 674		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 675		return -1;
 676	}
 677
 678	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 679			   phyAddr | regAddr);
 680
 681	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 682			   (MAC_MII_CONTROL_RC << 16));
 683
 684	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 685			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 686
 687	/* Wait for the read to complete */
 688	if (ql_wait_for_mii_ready(qdev)) {
 689		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 690		return -1;
 691	}
 692
 693	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 694	*value = (u16) temp;
 695
 696	if (scanWasEnabled)
 697		ql_mii_enable_scan_mode(qdev);
 698
 699	return 0;
 700}
 701
 702static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
 703{
 704	struct ql3xxx_port_registers __iomem *port_regs =
 705			qdev->mem_map_registers;
 706
 707	ql_mii_disable_scan_mode(qdev);
 708
 709	if (ql_wait_for_mii_ready(qdev)) {
 710		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 711		return -1;
 712	}
 713
 714	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 715			   qdev->PHYAddr | regAddr);
 716
 717	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 718
 719	/* Wait for write to complete. */
 720	if (ql_wait_for_mii_ready(qdev)) {
 721		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 722		return -1;
 723	}
 724
 725	ql_mii_enable_scan_mode(qdev);
 726
 727	return 0;
 728}
 729
 730static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
 731{
 732	u32 temp;
 733	struct ql3xxx_port_registers __iomem *port_regs =
 734			qdev->mem_map_registers;
 735
 736	ql_mii_disable_scan_mode(qdev);
 737
 738	if (ql_wait_for_mii_ready(qdev)) {
 739		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 740		return -1;
 741	}
 742
 743	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
 744			   qdev->PHYAddr | regAddr);
 745
 746	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 747			   (MAC_MII_CONTROL_RC << 16));
 748
 749	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
 750			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
 751
 752	/* Wait for the read to complete */
 753	if (ql_wait_for_mii_ready(qdev)) {
 754		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
 755		return -1;
 756	}
 757
 758	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
 759	*value = (u16) temp;
 760
 761	ql_mii_enable_scan_mode(qdev);
 762
 763	return 0;
 764}
 765
 766static void ql_petbi_reset(struct ql3_adapter *qdev)
 767{
 768	ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
 769}
 770
 771static void ql_petbi_start_neg(struct ql3_adapter *qdev)
 772{
 773	u16 reg;
 774
 775	/* Enable Auto-negotiation sense */
 776	ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
 777	reg |= PETBI_TBI_AUTO_SENSE;
 778	ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
 779
 780	ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
 781			 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
 782
 783	ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
 784			 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 785			 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
 786
 787}
 788
 789static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
 790{
 791	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
 792			    PHYAddr[qdev->mac_index]);
 793}
 794
 795static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
 796{
 797	u16 reg;
 798
 799	/* Enable Auto-negotiation sense */
 800	ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
 801			   PHYAddr[qdev->mac_index]);
 802	reg |= PETBI_TBI_AUTO_SENSE;
 803	ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
 804			    PHYAddr[qdev->mac_index]);
 805
 806	ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
 807			    PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
 808			    PHYAddr[qdev->mac_index]);
 809
 810	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
 811			    PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
 812			    PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
 813			    PHYAddr[qdev->mac_index]);
 814}
 815
 816static void ql_petbi_init(struct ql3_adapter *qdev)
 817{
 818	ql_petbi_reset(qdev);
 819	ql_petbi_start_neg(qdev);
 820}
 821
 822static void ql_petbi_init_ex(struct ql3_adapter *qdev)
 823{
 824	ql_petbi_reset_ex(qdev);
 825	ql_petbi_start_neg_ex(qdev);
 826}
 827
 828static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
 829{
 830	u16 reg;
 831
 832	if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
 833		return 0;
 834
 835	return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
 836}
 837
 838static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
 839{
 840	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
 841	/* power down device bit 11 = 1 */
 842	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
 843	/* enable diagnostic mode bit 2 = 1 */
 844	ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
 845	/* 1000MB amplitude adjust (see Agere errata) */
 846	ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
 847	/* 1000MB amplitude adjust (see Agere errata) */
 848	ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
 849	/* 100MB amplitude adjust (see Agere errata) */
 850	ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
 851	/* 100MB amplitude adjust (see Agere errata) */
 852	ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
 853	/* 10MB amplitude adjust (see Agere errata) */
 854	ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
 855	/* 10MB amplitude adjust (see Agere errata) */
 856	ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
 857	/* point to hidden reg 0x2806 */
 858	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
 859	/* Write new PHYAD w/bit 5 set */
 860	ql_mii_write_reg_ex(qdev, 0x11,
 861			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
 862	/*
 863	 * Disable diagnostic mode bit 2 = 0
 864	 * Power up device bit 11 = 0
 865	 * Link up (on) and activity (blink)
 866	 */
 867	ql_mii_write_reg(qdev, 0x12, 0x840a);
 868	ql_mii_write_reg(qdev, 0x00, 0x1140);
 869	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
 870}
 871
 872static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
 873				       u16 phyIdReg0, u16 phyIdReg1)
 874{
 875	enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
 876	u32   oui;
 877	u16   model;
 878	int i;
 879
 880	if (phyIdReg0 == 0xffff)
 881		return result;
 882
 883	if (phyIdReg1 == 0xffff)
 884		return result;
 885
 886	/* oui is split between two registers */
 887	oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
 888
 889	model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
 890
 891	/* Scan table for this PHY */
 892	for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
 893		if ((oui == PHY_DEVICES[i].phyIdOUI) &&
 894		    (model == PHY_DEVICES[i].phyIdModel)) {
 895			netdev_info(qdev->ndev, "Phy: %s\n",
 896				    PHY_DEVICES[i].name);
 897			result = PHY_DEVICES[i].phyDevice;
 898			break;
 899		}
 900	}
 901
 902	return result;
 903}
 904
 905static int ql_phy_get_speed(struct ql3_adapter *qdev)
 906{
 907	u16 reg;
 908
 909	switch (qdev->phyType) {
 910	case PHY_AGERE_ET1011C: {
 911		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
 912			return 0;
 913
 914		reg = (reg >> 8) & 3;
 915		break;
 916	}
 917	default:
 918		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 919			return 0;
 920
 921		reg = (((reg & 0x18) >> 3) & 3);
 922	}
 923
 924	switch (reg) {
 925	case 2:
 926		return SPEED_1000;
 927	case 1:
 928		return SPEED_100;
 929	case 0:
 930		return SPEED_10;
 931	default:
 932		return -1;
 933	}
 934}
 935
 936static int ql_is_full_dup(struct ql3_adapter *qdev)
 937{
 938	u16 reg;
 939
 940	switch (qdev->phyType) {
 941	case PHY_AGERE_ET1011C: {
 942		if (ql_mii_read_reg(qdev, 0x1A, &reg))
 943			return 0;
 944
 945		return ((reg & 0x0080) && (reg & 0x1000)) != 0;
 946	}
 947	case PHY_VITESSE_VSC8211:
 948	default: {
 949		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
 950			return 0;
 951		return (reg & PHY_AUX_DUPLEX_STAT) != 0;
 952	}
 953	}
 954}
 955
 956static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
 957{
 958	u16 reg;
 959
 960	if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
 961		return 0;
 962
 963	return (reg & PHY_NEG_PAUSE) != 0;
 964}
 965
 966static int PHY_Setup(struct ql3_adapter *qdev)
 967{
 968	u16   reg1;
 969	u16   reg2;
 970	bool  agereAddrChangeNeeded = false;
 971	u32 miiAddr = 0;
 972	int err;
 973
 974	/*  Determine the PHY we are using by reading the ID's */
 975	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
 976	if (err != 0) {
 977		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
 978		return err;
 979	}
 980
 981	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
 982	if (err != 0) {
 983		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
 984		return err;
 985	}
 986
 987	/*  Check if we have a Agere PHY */
 988	if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
 989
 990		/* Determine which MII address we should be using
 991		   determined by the index of the card */
 992		if (qdev->mac_index == 0)
 993			miiAddr = MII_AGERE_ADDR_1;
 994		else
 995			miiAddr = MII_AGERE_ADDR_2;
 996
 997		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
 998		if (err != 0) {
 999			netdev_err(qdev->ndev,
1000				   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1001			return err;
1002		}
1003
1004		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1005		if (err != 0) {
1006			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1007			return err;
1008		}
1009
1010		/*  We need to remember to initialize the Agere PHY */
1011		agereAddrChangeNeeded = true;
1012	}
1013
1014	/*  Determine the particular PHY we have on board to apply
1015	    PHY specific initializations */
1016	qdev->phyType = getPhyType(qdev, reg1, reg2);
1017
1018	if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1019		/* need this here so address gets changed */
1020		phyAgereSpecificInit(qdev, miiAddr);
1021	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1022		netdev_err(qdev->ndev, "PHY is unknown\n");
1023		return -EIO;
1024	}
1025
1026	return 0;
1027}
1028
1029/*
1030 * Caller holds hw_lock.
1031 */
1032static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1033{
1034	struct ql3xxx_port_registers __iomem *port_regs =
1035			qdev->mem_map_registers;
1036	u32 value;
1037
1038	if (enable)
1039		value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1040	else
1041		value = (MAC_CONFIG_REG_PE << 16);
1042
1043	if (qdev->mac_index)
1044		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1045	else
1046		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1047}
1048
1049/*
1050 * Caller holds hw_lock.
1051 */
1052static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1053{
1054	struct ql3xxx_port_registers __iomem *port_regs =
1055			qdev->mem_map_registers;
1056	u32 value;
1057
1058	if (enable)
1059		value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1060	else
1061		value = (MAC_CONFIG_REG_SR << 16);
1062
1063	if (qdev->mac_index)
1064		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1065	else
1066		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1067}
1068
1069/*
1070 * Caller holds hw_lock.
1071 */
1072static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1073{
1074	struct ql3xxx_port_registers __iomem *port_regs =
1075			qdev->mem_map_registers;
1076	u32 value;
1077
1078	if (enable)
1079		value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1080	else
1081		value = (MAC_CONFIG_REG_GM << 16);
1082
1083	if (qdev->mac_index)
1084		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1085	else
1086		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1087}
1088
1089/*
1090 * Caller holds hw_lock.
1091 */
1092static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1093{
1094	struct ql3xxx_port_registers __iomem *port_regs =
1095			qdev->mem_map_registers;
1096	u32 value;
1097
1098	if (enable)
1099		value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1100	else
1101		value = (MAC_CONFIG_REG_FD << 16);
1102
1103	if (qdev->mac_index)
1104		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1105	else
1106		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1107}
1108
1109/*
1110 * Caller holds hw_lock.
1111 */
1112static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1113{
1114	struct ql3xxx_port_registers __iomem *port_regs =
1115			qdev->mem_map_registers;
1116	u32 value;
1117
1118	if (enable)
1119		value =
1120		    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1121		     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1122	else
1123		value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1124
1125	if (qdev->mac_index)
1126		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1127	else
1128		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1129}
1130
1131/*
1132 * Caller holds hw_lock.
1133 */
1134static int ql_is_fiber(struct ql3_adapter *qdev)
1135{
1136	struct ql3xxx_port_registers __iomem *port_regs =
1137			qdev->mem_map_registers;
1138	u32 bitToCheck = 0;
1139	u32 temp;
1140
1141	switch (qdev->mac_index) {
1142	case 0:
1143		bitToCheck = PORT_STATUS_SM0;
1144		break;
1145	case 1:
1146		bitToCheck = PORT_STATUS_SM1;
1147		break;
1148	}
1149
1150	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1151	return (temp & bitToCheck) != 0;
1152}
1153
1154static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1155{
1156	u16 reg;
1157	ql_mii_read_reg(qdev, 0x00, &reg);
1158	return (reg & 0x1000) != 0;
1159}
1160
1161/*
1162 * Caller holds hw_lock.
1163 */
1164static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1165{
1166	struct ql3xxx_port_registers __iomem *port_regs =
1167			qdev->mem_map_registers;
1168	u32 bitToCheck = 0;
1169	u32 temp;
1170
1171	switch (qdev->mac_index) {
1172	case 0:
1173		bitToCheck = PORT_STATUS_AC0;
1174		break;
1175	case 1:
1176		bitToCheck = PORT_STATUS_AC1;
1177		break;
1178	}
1179
1180	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1181	if (temp & bitToCheck) {
1182		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1183		return 1;
1184	}
1185	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1186	return 0;
1187}
1188
1189/*
1190 *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1191 */
1192static int ql_is_neg_pause(struct ql3_adapter *qdev)
1193{
1194	if (ql_is_fiber(qdev))
1195		return ql_is_petbi_neg_pause(qdev);
1196	else
1197		return ql_is_phy_neg_pause(qdev);
1198}
1199
1200static int ql_auto_neg_error(struct ql3_adapter *qdev)
1201{
1202	struct ql3xxx_port_registers __iomem *port_regs =
1203			qdev->mem_map_registers;
1204	u32 bitToCheck = 0;
1205	u32 temp;
1206
1207	switch (qdev->mac_index) {
1208	case 0:
1209		bitToCheck = PORT_STATUS_AE0;
1210		break;
1211	case 1:
1212		bitToCheck = PORT_STATUS_AE1;
1213		break;
1214	}
1215	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1216	return (temp & bitToCheck) != 0;
1217}
1218
1219static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1220{
1221	if (ql_is_fiber(qdev))
1222		return SPEED_1000;
1223	else
1224		return ql_phy_get_speed(qdev);
1225}
1226
1227static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1228{
1229	if (ql_is_fiber(qdev))
1230		return 1;
1231	else
1232		return ql_is_full_dup(qdev);
1233}
1234
1235/*
1236 * Caller holds hw_lock.
1237 */
1238static int ql_link_down_detect(struct ql3_adapter *qdev)
1239{
1240	struct ql3xxx_port_registers __iomem *port_regs =
1241			qdev->mem_map_registers;
1242	u32 bitToCheck = 0;
1243	u32 temp;
1244
1245	switch (qdev->mac_index) {
1246	case 0:
1247		bitToCheck = ISP_CONTROL_LINK_DN_0;
1248		break;
1249	case 1:
1250		bitToCheck = ISP_CONTROL_LINK_DN_1;
1251		break;
1252	}
1253
1254	temp =
1255	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1256	return (temp & bitToCheck) != 0;
1257}
1258
1259/*
1260 * Caller holds hw_lock.
1261 */
1262static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1263{
1264	struct ql3xxx_port_registers __iomem *port_regs =
1265			qdev->mem_map_registers;
1266
1267	switch (qdev->mac_index) {
1268	case 0:
1269		ql_write_common_reg(qdev,
1270				    &port_regs->CommonRegs.ispControlStatus,
1271				    (ISP_CONTROL_LINK_DN_0) |
1272				    (ISP_CONTROL_LINK_DN_0 << 16));
1273		break;
1274
1275	case 1:
1276		ql_write_common_reg(qdev,
1277				    &port_regs->CommonRegs.ispControlStatus,
1278				    (ISP_CONTROL_LINK_DN_1) |
1279				    (ISP_CONTROL_LINK_DN_1 << 16));
1280		break;
1281
1282	default:
1283		return 1;
1284	}
1285
1286	return 0;
1287}
1288
1289/*
1290 * Caller holds hw_lock.
1291 */
1292static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1293{
1294	struct ql3xxx_port_registers __iomem *port_regs =
1295			qdev->mem_map_registers;
1296	u32 bitToCheck = 0;
1297	u32 temp;
1298
1299	switch (qdev->mac_index) {
1300	case 0:
1301		bitToCheck = PORT_STATUS_F1_ENABLED;
1302		break;
1303	case 1:
1304		bitToCheck = PORT_STATUS_F3_ENABLED;
1305		break;
1306	default:
1307		break;
1308	}
1309
1310	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1311	if (temp & bitToCheck) {
1312		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1313			     "not link master\n");
1314		return 0;
1315	}
1316
1317	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1318	return 1;
1319}
1320
1321static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1322{
1323	ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1324			    PHYAddr[qdev->mac_index]);
1325}
1326
1327static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1328{
1329	u16 reg;
1330	u16 portConfiguration;
1331
1332	if (qdev->phyType == PHY_AGERE_ET1011C)
1333		ql_mii_write_reg(qdev, 0x13, 0x0000);
1334					/* turn off external loopback */
1335
1336	if (qdev->mac_index == 0)
1337		portConfiguration =
1338			qdev->nvram_data.macCfg_port0.portConfiguration;
1339	else
1340		portConfiguration =
1341			qdev->nvram_data.macCfg_port1.portConfiguration;
1342
1343	/*  Some HBA's in the field are set to 0 and they need to
1344	    be reinterpreted with a default value */
1345	if (portConfiguration == 0)
1346		portConfiguration = PORT_CONFIG_DEFAULT;
1347
1348	/* Set the 1000 advertisements */
1349	ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1350			   PHYAddr[qdev->mac_index]);
1351	reg &= ~PHY_GIG_ALL_PARAMS;
1352
1353	if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1354		if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1355			reg |= PHY_GIG_ADV_1000F;
1356		else
1357			reg |= PHY_GIG_ADV_1000H;
1358	}
1359
1360	ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1361			    PHYAddr[qdev->mac_index]);
1362
1363	/* Set the 10/100 & pause negotiation advertisements */
1364	ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1365			   PHYAddr[qdev->mac_index]);
1366	reg &= ~PHY_NEG_ALL_PARAMS;
1367
1368	if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1369		reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1370
1371	if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1372		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1373			reg |= PHY_NEG_ADV_100F;
1374
1375		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1376			reg |= PHY_NEG_ADV_10F;
1377	}
1378
1379	if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1380		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1381			reg |= PHY_NEG_ADV_100H;
1382
1383		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1384			reg |= PHY_NEG_ADV_10H;
1385	}
1386
1387	if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1388		reg |= 1;
1389
1390	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1391			    PHYAddr[qdev->mac_index]);
1392
1393	ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1394
1395	ql_mii_write_reg_ex(qdev, CONTROL_REG,
1396			    reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1397			    PHYAddr[qdev->mac_index]);
1398}
1399
1400static void ql_phy_init_ex(struct ql3_adapter *qdev)
1401{
1402	ql_phy_reset_ex(qdev);
1403	PHY_Setup(qdev);
1404	ql_phy_start_neg_ex(qdev);
1405}
1406
1407/*
1408 * Caller holds hw_lock.
1409 */
1410static u32 ql_get_link_state(struct ql3_adapter *qdev)
1411{
1412	struct ql3xxx_port_registers __iomem *port_regs =
1413			qdev->mem_map_registers;
1414	u32 bitToCheck = 0;
1415	u32 temp, linkState;
1416
1417	switch (qdev->mac_index) {
1418	case 0:
1419		bitToCheck = PORT_STATUS_UP0;
1420		break;
1421	case 1:
1422		bitToCheck = PORT_STATUS_UP1;
1423		break;
1424	}
1425
1426	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1427	if (temp & bitToCheck)
1428		linkState = LS_UP;
1429	else
1430		linkState = LS_DOWN;
1431
1432	return linkState;
1433}
1434
1435static int ql_port_start(struct ql3_adapter *qdev)
1436{
1437	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1438		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1439			 2) << 7)) {
1440		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1441		return -1;
1442	}
1443
1444	if (ql_is_fiber(qdev)) {
1445		ql_petbi_init(qdev);
1446	} else {
1447		/* Copper port */
1448		ql_phy_init_ex(qdev);
1449	}
1450
1451	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1452	return 0;
1453}
1454
1455static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1456{
1457
1458	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1459		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1460			 2) << 7))
1461		return -1;
1462
1463	if (!ql_auto_neg_error(qdev)) {
1464		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1465			/* configure the MAC */
1466			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1467				     "Configuring link\n");
1468			ql_mac_cfg_soft_reset(qdev, 1);
1469			ql_mac_cfg_gig(qdev,
1470				       (ql_get_link_speed
1471					(qdev) ==
1472					SPEED_1000));
1473			ql_mac_cfg_full_dup(qdev,
1474					    ql_is_link_full_dup
1475					    (qdev));
1476			ql_mac_cfg_pause(qdev,
1477					 ql_is_neg_pause
1478					 (qdev));
1479			ql_mac_cfg_soft_reset(qdev, 0);
1480
1481			/* enable the MAC */
1482			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1483				     "Enabling mac\n");
1484			ql_mac_enable(qdev, 1);
1485		}
1486
1487		qdev->port_link_state = LS_UP;
1488		netif_start_queue(qdev->ndev);
1489		netif_carrier_on(qdev->ndev);
1490		netif_info(qdev, link, qdev->ndev,
1491			   "Link is up at %d Mbps, %s duplex\n",
1492			   ql_get_link_speed(qdev),
1493			   ql_is_link_full_dup(qdev) ? "full" : "half");
1494
1495	} else {	/* Remote error detected */
1496
1497		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1498			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1499				     "Remote error detected. Calling ql_port_start()\n");
1500			/*
1501			 * ql_port_start() is shared code and needs
1502			 * to lock the PHY on it's own.
1503			 */
1504			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1505			if (ql_port_start(qdev))	/* Restart port */
1506				return -1;
1507			return 0;
1508		}
1509	}
1510	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1511	return 0;
1512}
1513
1514static void ql_link_state_machine_work(struct work_struct *work)
1515{
1516	struct ql3_adapter *qdev =
1517		container_of(work, struct ql3_adapter, link_state_work.work);
1518
1519	u32 curr_link_state;
1520	unsigned long hw_flags;
1521
1522	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1523
1524	curr_link_state = ql_get_link_state(qdev);
1525
1526	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1527		netif_info(qdev, link, qdev->ndev,
1528			   "Reset in progress, skip processing link state\n");
1529
1530		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1531
1532		/* Restart timer on 2 second interval. */
1533		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1534
1535		return;
1536	}
1537
1538	switch (qdev->port_link_state) {
1539	default:
1540		if (test_bit(QL_LINK_MASTER, &qdev->flags))
1541			ql_port_start(qdev);
1542		qdev->port_link_state = LS_DOWN;
1543		fallthrough;
1544
1545	case LS_DOWN:
1546		if (curr_link_state == LS_UP) {
1547			netif_info(qdev, link, qdev->ndev, "Link is up\n");
1548			if (ql_is_auto_neg_complete(qdev))
1549				ql_finish_auto_neg(qdev);
1550
1551			if (qdev->port_link_state == LS_UP)
1552				ql_link_down_detect_clear(qdev);
1553
1554			qdev->port_link_state = LS_UP;
1555		}
1556		break;
1557
1558	case LS_UP:
1559		/*
1560		 * See if the link is currently down or went down and came
1561		 * back up
1562		 */
1563		if (curr_link_state == LS_DOWN) {
1564			netif_info(qdev, link, qdev->ndev, "Link is down\n");
1565			qdev->port_link_state = LS_DOWN;
1566		}
1567		if (ql_link_down_detect(qdev))
1568			qdev->port_link_state = LS_DOWN;
1569		break;
1570	}
1571	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1572
1573	/* Restart timer on 2 second interval. */
1574	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1575}
1576
1577/*
1578 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1579 */
1580static void ql_get_phy_owner(struct ql3_adapter *qdev)
1581{
1582	if (ql_this_adapter_controls_port(qdev))
1583		set_bit(QL_LINK_MASTER, &qdev->flags);
1584	else
1585		clear_bit(QL_LINK_MASTER, &qdev->flags);
1586}
1587
1588/*
1589 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1590 */
1591static void ql_init_scan_mode(struct ql3_adapter *qdev)
1592{
1593	ql_mii_enable_scan_mode(qdev);
1594
1595	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1596		if (ql_this_adapter_controls_port(qdev))
1597			ql_petbi_init_ex(qdev);
1598	} else {
1599		if (ql_this_adapter_controls_port(qdev))
1600			ql_phy_init_ex(qdev);
1601	}
1602}
1603
1604/*
1605 * MII_Setup needs to be called before taking the PHY out of reset
1606 * so that the management interface clock speed can be set properly.
1607 * It would be better if we had a way to disable MDC until after the
1608 * PHY is out of reset, but we don't have that capability.
1609 */
1610static int ql_mii_setup(struct ql3_adapter *qdev)
1611{
1612	u32 reg;
1613	struct ql3xxx_port_registers __iomem *port_regs =
1614			qdev->mem_map_registers;
1615
1616	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1617			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1618			 2) << 7))
1619		return -1;
1620
1621	if (qdev->device_id == QL3032_DEVICE_ID)
1622		ql_write_page0_reg(qdev,
1623			&port_regs->macMIIMgmtControlReg, 0x0f00000);
1624
1625	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
1626	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1627
1628	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1629			   reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1630
1631	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1632	return 0;
1633}
1634
1635#define SUPPORTED_OPTICAL_MODES	(SUPPORTED_1000baseT_Full |	\
1636				 SUPPORTED_FIBRE |		\
1637				 SUPPORTED_Autoneg)
1638#define SUPPORTED_TP_MODES	(SUPPORTED_10baseT_Half |	\
1639				 SUPPORTED_10baseT_Full |	\
1640				 SUPPORTED_100baseT_Half |	\
1641				 SUPPORTED_100baseT_Full |	\
1642				 SUPPORTED_1000baseT_Half |	\
1643				 SUPPORTED_1000baseT_Full |	\
1644				 SUPPORTED_Autoneg |		\
1645				 SUPPORTED_TP)			\
1646
1647static u32 ql_supported_modes(struct ql3_adapter *qdev)
1648{
1649	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1650		return SUPPORTED_OPTICAL_MODES;
1651
1652	return SUPPORTED_TP_MODES;
1653}
1654
1655static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1656{
1657	int status;
1658	unsigned long hw_flags;
1659	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1660	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1661			    (QL_RESOURCE_BITS_BASE_CODE |
1662			     (qdev->mac_index) * 2) << 7)) {
1663		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1664		return 0;
1665	}
1666	status = ql_is_auto_cfg(qdev);
1667	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1668	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1669	return status;
1670}
1671
1672static u32 ql_get_speed(struct ql3_adapter *qdev)
1673{
1674	u32 status;
1675	unsigned long hw_flags;
1676	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1677	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1678			    (QL_RESOURCE_BITS_BASE_CODE |
1679			     (qdev->mac_index) * 2) << 7)) {
1680		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1681		return 0;
1682	}
1683	status = ql_get_link_speed(qdev);
1684	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1685	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1686	return status;
1687}
1688
1689static int ql_get_full_dup(struct ql3_adapter *qdev)
1690{
1691	int status;
1692	unsigned long hw_flags;
1693	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1694	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1695			    (QL_RESOURCE_BITS_BASE_CODE |
1696			     (qdev->mac_index) * 2) << 7)) {
1697		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1698		return 0;
1699	}
1700	status = ql_is_link_full_dup(qdev);
1701	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1702	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1703	return status;
1704}
1705
1706static int ql_get_link_ksettings(struct net_device *ndev,
1707				 struct ethtool_link_ksettings *cmd)
1708{
1709	struct ql3_adapter *qdev = netdev_priv(ndev);
1710	u32 supported, advertising;
1711
1712	supported = ql_supported_modes(qdev);
1713
1714	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1715		cmd->base.port = PORT_FIBRE;
1716	} else {
1717		cmd->base.port = PORT_TP;
1718		cmd->base.phy_address = qdev->PHYAddr;
1719	}
1720	advertising = ql_supported_modes(qdev);
1721	cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
1722	cmd->base.speed = ql_get_speed(qdev);
1723	cmd->base.duplex = ql_get_full_dup(qdev);
1724
1725	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1726						supported);
1727	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1728						advertising);
1729
1730	return 0;
1731}
1732
1733static void ql_get_drvinfo(struct net_device *ndev,
1734			   struct ethtool_drvinfo *drvinfo)
1735{
1736	struct ql3_adapter *qdev = netdev_priv(ndev);
1737	strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1738	strlcpy(drvinfo->version, ql3xxx_driver_version,
1739		sizeof(drvinfo->version));
1740	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1741		sizeof(drvinfo->bus_info));
1742}
1743
1744static u32 ql_get_msglevel(struct net_device *ndev)
1745{
1746	struct ql3_adapter *qdev = netdev_priv(ndev);
1747	return qdev->msg_enable;
1748}
1749
1750static void ql_set_msglevel(struct net_device *ndev, u32 value)
1751{
1752	struct ql3_adapter *qdev = netdev_priv(ndev);
1753	qdev->msg_enable = value;
1754}
1755
1756static void ql_get_pauseparam(struct net_device *ndev,
1757			      struct ethtool_pauseparam *pause)
1758{
1759	struct ql3_adapter *qdev = netdev_priv(ndev);
1760	struct ql3xxx_port_registers __iomem *port_regs =
1761		qdev->mem_map_registers;
1762
1763	u32 reg;
1764	if (qdev->mac_index == 0)
1765		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1766	else
1767		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1768
1769	pause->autoneg  = ql_get_auto_cfg_status(qdev);
1770	pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1771	pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1772}
1773
1774static const struct ethtool_ops ql3xxx_ethtool_ops = {
1775	.get_drvinfo = ql_get_drvinfo,
1776	.get_link = ethtool_op_get_link,
1777	.get_msglevel = ql_get_msglevel,
1778	.set_msglevel = ql_set_msglevel,
1779	.get_pauseparam = ql_get_pauseparam,
1780	.get_link_ksettings = ql_get_link_ksettings,
1781};
1782
1783static int ql_populate_free_queue(struct ql3_adapter *qdev)
1784{
1785	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1786	dma_addr_t map;
1787	int err;
1788
1789	while (lrg_buf_cb) {
1790		if (!lrg_buf_cb->skb) {
1791			lrg_buf_cb->skb =
1792				netdev_alloc_skb(qdev->ndev,
1793						 qdev->lrg_buffer_len);
1794			if (unlikely(!lrg_buf_cb->skb)) {
1795				netdev_printk(KERN_DEBUG, qdev->ndev,
1796					      "Failed netdev_alloc_skb()\n");
1797				break;
1798			} else {
1799				/*
1800				 * We save some space to copy the ethhdr from
1801				 * first buffer
1802				 */
1803				skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1804				map = dma_map_single(&qdev->pdev->dev,
1805						     lrg_buf_cb->skb->data,
1806						     qdev->lrg_buffer_len - QL_HEADER_SPACE,
1807						     DMA_FROM_DEVICE);
1808
1809				err = dma_mapping_error(&qdev->pdev->dev, map);
1810				if (err) {
1811					netdev_err(qdev->ndev,
1812						   "PCI mapping failed with error: %d\n",
1813						   err);
1814					dev_kfree_skb(lrg_buf_cb->skb);
1815					lrg_buf_cb->skb = NULL;
1816					break;
1817				}
1818
1819
1820				lrg_buf_cb->buf_phy_addr_low =
1821					cpu_to_le32(LS_64BITS(map));
1822				lrg_buf_cb->buf_phy_addr_high =
1823					cpu_to_le32(MS_64BITS(map));
1824				dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1825				dma_unmap_len_set(lrg_buf_cb, maplen,
1826						  qdev->lrg_buffer_len -
1827						  QL_HEADER_SPACE);
1828				--qdev->lrg_buf_skb_check;
1829				if (!qdev->lrg_buf_skb_check)
1830					return 1;
1831			}
1832		}
1833		lrg_buf_cb = lrg_buf_cb->next;
1834	}
1835	return 0;
1836}
1837
1838/*
1839 * Caller holds hw_lock.
1840 */
1841static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1842{
1843	struct ql3xxx_port_registers __iomem *port_regs =
1844		qdev->mem_map_registers;
1845
1846	if (qdev->small_buf_release_cnt >= 16) {
1847		while (qdev->small_buf_release_cnt >= 16) {
1848			qdev->small_buf_q_producer_index++;
1849
1850			if (qdev->small_buf_q_producer_index ==
1851			    NUM_SBUFQ_ENTRIES)
1852				qdev->small_buf_q_producer_index = 0;
1853			qdev->small_buf_release_cnt -= 8;
1854		}
1855		wmb();
1856		writel_relaxed(qdev->small_buf_q_producer_index,
1857			       &port_regs->CommonRegs.rxSmallQProducerIndex);
1858	}
1859}
1860
1861/*
1862 * Caller holds hw_lock.
1863 */
1864static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1865{
1866	struct bufq_addr_element *lrg_buf_q_ele;
1867	int i;
1868	struct ql_rcv_buf_cb *lrg_buf_cb;
1869	struct ql3xxx_port_registers __iomem *port_regs =
1870		qdev->mem_map_registers;
1871
1872	if ((qdev->lrg_buf_free_count >= 8) &&
1873	    (qdev->lrg_buf_release_cnt >= 16)) {
1874
1875		if (qdev->lrg_buf_skb_check)
1876			if (!ql_populate_free_queue(qdev))
1877				return;
1878
1879		lrg_buf_q_ele = qdev->lrg_buf_next_free;
1880
1881		while ((qdev->lrg_buf_release_cnt >= 16) &&
1882		       (qdev->lrg_buf_free_count >= 8)) {
1883
1884			for (i = 0; i < 8; i++) {
1885				lrg_buf_cb =
1886				    ql_get_from_lrg_buf_free_list(qdev);
1887				lrg_buf_q_ele->addr_high =
1888				    lrg_buf_cb->buf_phy_addr_high;
1889				lrg_buf_q_ele->addr_low =
1890				    lrg_buf_cb->buf_phy_addr_low;
1891				lrg_buf_q_ele++;
1892
1893				qdev->lrg_buf_release_cnt--;
1894			}
1895
1896			qdev->lrg_buf_q_producer_index++;
1897
1898			if (qdev->lrg_buf_q_producer_index ==
1899			    qdev->num_lbufq_entries)
1900				qdev->lrg_buf_q_producer_index = 0;
1901
1902			if (qdev->lrg_buf_q_producer_index ==
1903			    (qdev->num_lbufq_entries - 1)) {
1904				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1905			}
1906		}
1907		wmb();
1908		qdev->lrg_buf_next_free = lrg_buf_q_ele;
1909		writel(qdev->lrg_buf_q_producer_index,
1910			&port_regs->CommonRegs.rxLargeQProducerIndex);
1911	}
1912}
1913
1914static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1915				   struct ob_mac_iocb_rsp *mac_rsp)
1916{
1917	struct ql_tx_buf_cb *tx_cb;
1918	int i;
1919
1920	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1921		netdev_warn(qdev->ndev,
1922			    "Frame too short but it was padded and sent\n");
1923	}
1924
1925	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1926
1927	/*  Check the transmit response flags for any errors */
1928	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1929		netdev_err(qdev->ndev,
1930			   "Frame too short to be legal, frame not sent\n");
1931
1932		qdev->ndev->stats.tx_errors++;
1933		goto frame_not_sent;
1934	}
1935
1936	if (tx_cb->seg_count == 0) {
1937		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1938			   mac_rsp->transaction_id);
1939
1940		qdev->ndev->stats.tx_errors++;
1941		goto invalid_seg_count;
1942	}
1943
1944	dma_unmap_single(&qdev->pdev->dev,
1945			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
1946			 dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
1947	tx_cb->seg_count--;
1948	if (tx_cb->seg_count) {
1949		for (i = 1; i < tx_cb->seg_count; i++) {
1950			dma_unmap_page(&qdev->pdev->dev,
1951				       dma_unmap_addr(&tx_cb->map[i], mapaddr),
1952				       dma_unmap_len(&tx_cb->map[i], maplen),
1953				       DMA_TO_DEVICE);
1954		}
1955	}
1956	qdev->ndev->stats.tx_packets++;
1957	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1958
1959frame_not_sent:
1960	dev_kfree_skb_irq(tx_cb->skb);
1961	tx_cb->skb = NULL;
1962
1963invalid_seg_count:
1964	atomic_inc(&qdev->tx_count);
1965}
1966
1967static void ql_get_sbuf(struct ql3_adapter *qdev)
1968{
1969	if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1970		qdev->small_buf_index = 0;
1971	qdev->small_buf_release_cnt++;
1972}
1973
1974static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1975{
1976	struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1977	lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1978	qdev->lrg_buf_release_cnt++;
1979	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1980		qdev->lrg_buf_index = 0;
1981	return lrg_buf_cb;
1982}
1983
1984/*
1985 * The difference between 3022 and 3032 for inbound completions:
1986 * 3022 uses two buffers per completion.  The first buffer contains
1987 * (some) header info, the second the remainder of the headers plus
1988 * the data.  For this chip we reserve some space at the top of the
1989 * receive buffer so that the header info in buffer one can be
1990 * prepended to the buffer two.  Buffer two is the sent up while
1991 * buffer one is returned to the hardware to be reused.
1992 * 3032 receives all of it's data and headers in one buffer for a
1993 * simpler process.  3032 also supports checksum verification as
1994 * can be seen in ql_process_macip_rx_intr().
1995 */
1996static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1997				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1998{
1999	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2000	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2001	struct sk_buff *skb;
2002	u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2003
2004	/*
2005	 * Get the inbound address list (small buffer).
2006	 */
2007	ql_get_sbuf(qdev);
2008
2009	if (qdev->device_id == QL3022_DEVICE_ID)
2010		lrg_buf_cb1 = ql_get_lbuf(qdev);
2011
2012	/* start of second buffer */
2013	lrg_buf_cb2 = ql_get_lbuf(qdev);
2014	skb = lrg_buf_cb2->skb;
2015
2016	qdev->ndev->stats.rx_packets++;
2017	qdev->ndev->stats.rx_bytes += length;
2018
2019	skb_put(skb, length);
2020	dma_unmap_single(&qdev->pdev->dev,
2021			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2022			 dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
2023	prefetch(skb->data);
2024	skb_checksum_none_assert(skb);
2025	skb->protocol = eth_type_trans(skb, qdev->ndev);
2026
2027	napi_gro_receive(&qdev->napi, skb);
2028	lrg_buf_cb2->skb = NULL;
2029
2030	if (qdev->device_id == QL3022_DEVICE_ID)
2031		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2032	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2033}
2034
2035static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2036				     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2037{
2038	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2039	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2040	struct sk_buff *skb1 = NULL, *skb2;
2041	struct net_device *ndev = qdev->ndev;
2042	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2043	u16 size = 0;
2044
2045	/*
2046	 * Get the inbound address list (small buffer).
2047	 */
2048
2049	ql_get_sbuf(qdev);
2050
2051	if (qdev->device_id == QL3022_DEVICE_ID) {
2052		/* start of first buffer on 3022 */
2053		lrg_buf_cb1 = ql_get_lbuf(qdev);
2054		skb1 = lrg_buf_cb1->skb;
2055		size = ETH_HLEN;
2056		if (*((u16 *) skb1->data) != 0xFFFF)
2057			size += VLAN_ETH_HLEN - ETH_HLEN;
2058	}
2059
2060	/* start of second buffer */
2061	lrg_buf_cb2 = ql_get_lbuf(qdev);
2062	skb2 = lrg_buf_cb2->skb;
2063
2064	skb_put(skb2, length);	/* Just the second buffer length here. */
2065	dma_unmap_single(&qdev->pdev->dev,
2066			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2067			 dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
2068	prefetch(skb2->data);
2069
2070	skb_checksum_none_assert(skb2);
2071	if (qdev->device_id == QL3022_DEVICE_ID) {
2072		/*
2073		 * Copy the ethhdr from first buffer to second. This
2074		 * is necessary for 3022 IP completions.
2075		 */
2076		skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2077						 skb_push(skb2, size), size);
2078	} else {
2079		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2080		if (checksum &
2081			(IB_IP_IOCB_RSP_3032_ICE |
2082			 IB_IP_IOCB_RSP_3032_CE)) {
2083			netdev_err(ndev,
2084				   "%s: Bad checksum for this %s packet, checksum = %x\n",
2085				   __func__,
2086				   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2087				    "TCP" : "UDP"), checksum);
2088		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2089				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
2090				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2091			skb2->ip_summed = CHECKSUM_UNNECESSARY;
2092		}
2093	}
2094	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2095
2096	napi_gro_receive(&qdev->napi, skb2);
2097	ndev->stats.rx_packets++;
2098	ndev->stats.rx_bytes += length;
2099	lrg_buf_cb2->skb = NULL;
2100
2101	if (qdev->device_id == QL3022_DEVICE_ID)
2102		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2103	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2104}
2105
2106static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
2107{
2108	struct net_rsp_iocb *net_rsp;
2109	struct net_device *ndev = qdev->ndev;
2110	int work_done = 0;
2111
2112	/* While there are entries in the completion queue. */
2113	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2114		qdev->rsp_consumer_index) && (work_done < budget)) {
2115
2116		net_rsp = qdev->rsp_current;
2117		rmb();
2118		/*
2119		 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2120		 * if the inbound completion is for a VLAN.
2121		 */
2122		if (qdev->device_id == QL3032_DEVICE_ID)
2123			net_rsp->opcode &= 0x7f;
2124		switch (net_rsp->opcode) {
2125
2126		case OPCODE_OB_MAC_IOCB_FN0:
2127		case OPCODE_OB_MAC_IOCB_FN2:
2128			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2129					       net_rsp);
2130			break;
2131
2132		case OPCODE_IB_MAC_IOCB:
2133		case OPCODE_IB_3032_MAC_IOCB:
2134			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2135					       net_rsp);
2136			work_done++;
2137			break;
2138
2139		case OPCODE_IB_IP_IOCB:
2140		case OPCODE_IB_3032_IP_IOCB:
2141			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2142						 net_rsp);
2143			work_done++;
2144			break;
2145		default: {
2146			u32 *tmp = (u32 *)net_rsp;
2147			netdev_err(ndev,
2148				   "Hit default case, not handled!\n"
2149				   "	dropping the packet, opcode = %x\n"
2150				   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2151				   net_rsp->opcode,
2152				   (unsigned long int)tmp[0],
2153				   (unsigned long int)tmp[1],
2154				   (unsigned long int)tmp[2],
2155				   (unsigned long int)tmp[3]);
2156		}
2157		}
2158
2159		qdev->rsp_consumer_index++;
2160
2161		if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2162			qdev->rsp_consumer_index = 0;
2163			qdev->rsp_current = qdev->rsp_q_virt_addr;
2164		} else {
2165			qdev->rsp_current++;
2166		}
2167
2168	}
2169
2170	return work_done;
2171}
2172
2173static int ql_poll(struct napi_struct *napi, int budget)
2174{
2175	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2176	struct ql3xxx_port_registers __iomem *port_regs =
2177		qdev->mem_map_registers;
2178	int work_done;
2179
2180	work_done = ql_tx_rx_clean(qdev, budget);
2181
2182	if (work_done < budget && napi_complete_done(napi, work_done)) {
2183		unsigned long flags;
2184
2185		spin_lock_irqsave(&qdev->hw_lock, flags);
2186		ql_update_small_bufq_prod_index(qdev);
2187		ql_update_lrg_bufq_prod_index(qdev);
2188		writel(qdev->rsp_consumer_index,
2189			    &port_regs->CommonRegs.rspQConsumerIndex);
2190		spin_unlock_irqrestore(&qdev->hw_lock, flags);
2191
2192		ql_enable_interrupts(qdev);
2193	}
2194	return work_done;
2195}
2196
2197static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2198{
2199
2200	struct net_device *ndev = dev_id;
2201	struct ql3_adapter *qdev = netdev_priv(ndev);
2202	struct ql3xxx_port_registers __iomem *port_regs =
2203		qdev->mem_map_registers;
2204	u32 value;
2205	int handled = 1;
2206	u32 var;
2207
2208	value = ql_read_common_reg_l(qdev,
2209				     &port_regs->CommonRegs.ispControlStatus);
2210
2211	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2212		spin_lock(&qdev->adapter_lock);
2213		netif_stop_queue(qdev->ndev);
2214		netif_carrier_off(qdev->ndev);
2215		ql_disable_interrupts(qdev);
2216		qdev->port_link_state = LS_DOWN;
2217		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2218
2219		if (value & ISP_CONTROL_FE) {
2220			/*
2221			 * Chip Fatal Error.
2222			 */
2223			var =
2224			    ql_read_page0_reg_l(qdev,
2225					      &port_regs->PortFatalErrStatus);
2226			netdev_warn(ndev,
2227				    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2228				    var);
2229			set_bit(QL_RESET_START, &qdev->flags) ;
2230		} else {
2231			/*
2232			 * Soft Reset Requested.
2233			 */
2234			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2235			netdev_err(ndev,
2236				   "Another function issued a reset to the chip. ISR value = %x\n",
2237				   value);
2238		}
2239		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2240		spin_unlock(&qdev->adapter_lock);
2241	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2242		ql_disable_interrupts(qdev);
2243		if (likely(napi_schedule_prep(&qdev->napi)))
2244			__napi_schedule(&qdev->napi);
2245	} else
2246		return IRQ_NONE;
2247
2248	return IRQ_RETVAL(handled);
2249}
2250
2251/*
2252 * Get the total number of segments needed for the given number of fragments.
2253 * This is necessary because outbound address lists (OAL) will be used when
2254 * more than two frags are given.  Each address list has 5 addr/len pairs.
2255 * The 5th pair in each OAL is used to  point to the next OAL if more frags
2256 * are coming.  That is why the frags:segment count ratio is not linear.
2257 */
2258static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2259{
2260	if (qdev->device_id == QL3022_DEVICE_ID)
2261		return 1;
2262
2263	if (frags <= 2)
2264		return frags + 1;
2265	else if (frags <= 6)
2266		return frags + 2;
2267	else if (frags <= 10)
2268		return frags + 3;
2269	else if (frags <= 14)
2270		return frags + 4;
2271	else if (frags <= 18)
2272		return frags + 5;
2273	return -1;
2274}
2275
2276static void ql_hw_csum_setup(const struct sk_buff *skb,
2277			     struct ob_mac_iocb_req *mac_iocb_ptr)
2278{
2279	const struct iphdr *ip = ip_hdr(skb);
2280
2281	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2282	mac_iocb_ptr->ip_hdr_len = ip->ihl;
2283
2284	if (ip->protocol == IPPROTO_TCP) {
2285		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2286			OB_3032MAC_IOCB_REQ_IC;
2287	} else {
2288		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2289			OB_3032MAC_IOCB_REQ_IC;
2290	}
2291
2292}
2293
2294/*
2295 * Map the buffers for this transmit.
2296 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2297 */
2298static int ql_send_map(struct ql3_adapter *qdev,
2299				struct ob_mac_iocb_req *mac_iocb_ptr,
2300				struct ql_tx_buf_cb *tx_cb,
2301				struct sk_buff *skb)
2302{
2303	struct oal *oal;
2304	struct oal_entry *oal_entry;
2305	int len = skb_headlen(skb);
2306	dma_addr_t map;
2307	int err;
2308	int completed_segs, i;
2309	int seg_cnt, seg = 0;
2310	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2311
2312	seg_cnt = tx_cb->seg_count;
2313	/*
2314	 * Map the skb buffer first.
2315	 */
2316	map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
2317
2318	err = dma_mapping_error(&qdev->pdev->dev, map);
2319	if (err) {
2320		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2321			   err);
2322
2323		return NETDEV_TX_BUSY;
2324	}
2325
2326	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2327	oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2328	oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2329	oal_entry->len = cpu_to_le32(len);
2330	dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2331	dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2332	seg++;
2333
2334	if (seg_cnt == 1) {
2335		/* Terminate the last segment. */
2336		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2337		return NETDEV_TX_OK;
2338	}
2339	oal = tx_cb->oal;
2340	for (completed_segs = 0;
2341	     completed_segs < frag_cnt;
2342	     completed_segs++, seg++) {
2343		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2344		oal_entry++;
2345		/*
2346		 * Check for continuation requirements.
2347		 * It's strange but necessary.
2348		 * Continuation entry points to outbound address list.
2349		 */
2350		if ((seg == 2 && seg_cnt > 3) ||
2351		    (seg == 7 && seg_cnt > 8) ||
2352		    (seg == 12 && seg_cnt > 13) ||
2353		    (seg == 17 && seg_cnt > 18)) {
2354			map = dma_map_single(&qdev->pdev->dev, oal,
2355					     sizeof(struct oal),
2356					     DMA_TO_DEVICE);
2357
2358			err = dma_mapping_error(&qdev->pdev->dev, map);
2359			if (err) {
2360				netdev_err(qdev->ndev,
2361					   "PCI mapping outbound address list with error: %d\n",
2362					   err);
2363				goto map_error;
2364			}
2365
2366			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2367			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2368			oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2369						     OAL_CONT_ENTRY);
2370			dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2371			dma_unmap_len_set(&tx_cb->map[seg], maplen,
2372					  sizeof(struct oal));
2373			oal_entry = (struct oal_entry *)oal;
2374			oal++;
2375			seg++;
2376		}
2377
2378		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2379				       DMA_TO_DEVICE);
2380
2381		err = dma_mapping_error(&qdev->pdev->dev, map);
2382		if (err) {
2383			netdev_err(qdev->ndev,
2384				   "PCI mapping frags failed with error: %d\n",
2385				   err);
2386			goto map_error;
2387		}
2388
2389		oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2390		oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2391		oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2392		dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2393		dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2394		}
2395	/* Terminate the last segment. */
2396	oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2397	return NETDEV_TX_OK;
2398
2399map_error:
2400	/* A PCI mapping failed and now we will need to back out
2401	 * We need to traverse through the oal's and associated pages which
2402	 * have been mapped and now we must unmap them to clean up properly
2403	 */
2404
2405	seg = 1;
2406	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2407	oal = tx_cb->oal;
2408	for (i = 0; i < completed_segs; i++, seg++) {
2409		oal_entry++;
2410
2411		/*
2412		 * Check for continuation requirements.
2413		 * It's strange but necessary.
2414		 */
2415
2416		if ((seg == 2 && seg_cnt > 3) ||
2417		    (seg == 7 && seg_cnt > 8) ||
2418		    (seg == 12 && seg_cnt > 13) ||
2419		    (seg == 17 && seg_cnt > 18)) {
2420			dma_unmap_single(&qdev->pdev->dev,
2421					 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2422					 dma_unmap_len(&tx_cb->map[seg], maplen),
2423					 DMA_TO_DEVICE);
2424			oal++;
2425			seg++;
2426		}
2427
2428		dma_unmap_page(&qdev->pdev->dev,
2429			       dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2430			       dma_unmap_len(&tx_cb->map[seg], maplen),
2431			       DMA_TO_DEVICE);
2432	}
2433
2434	dma_unmap_single(&qdev->pdev->dev,
2435			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2436			 dma_unmap_addr(&tx_cb->map[0], maplen),
2437			 DMA_TO_DEVICE);
2438
2439	return NETDEV_TX_BUSY;
2440
2441}
2442
2443/*
2444 * The difference between 3022 and 3032 sends:
2445 * 3022 only supports a simple single segment transmission.
2446 * 3032 supports checksumming and scatter/gather lists (fragments).
2447 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2448 * in the IOCB plus a chain of outbound address lists (OAL) that
2449 * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2450 * will be used to point to an OAL when more ALP entries are required.
2451 * The IOCB is always the top of the chain followed by one or more
2452 * OALs (when necessary).
2453 */
2454static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2455			       struct net_device *ndev)
2456{
2457	struct ql3_adapter *qdev = netdev_priv(ndev);
2458	struct ql3xxx_port_registers __iomem *port_regs =
2459			qdev->mem_map_registers;
2460	struct ql_tx_buf_cb *tx_cb;
2461	u32 tot_len = skb->len;
2462	struct ob_mac_iocb_req *mac_iocb_ptr;
2463
2464	if (unlikely(atomic_read(&qdev->tx_count) < 2))
2465		return NETDEV_TX_BUSY;
2466
2467	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2468	tx_cb->seg_count = ql_get_seg_count(qdev,
2469					     skb_shinfo(skb)->nr_frags);
2470	if (tx_cb->seg_count == -1) {
2471		netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2472		return NETDEV_TX_OK;
2473	}
2474
2475	mac_iocb_ptr = tx_cb->queue_entry;
2476	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2477	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2478	mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2479	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2480	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2481	mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2482	tx_cb->skb = skb;
2483	if (qdev->device_id == QL3032_DEVICE_ID &&
2484	    skb->ip_summed == CHECKSUM_PARTIAL)
2485		ql_hw_csum_setup(skb, mac_iocb_ptr);
2486
2487	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2488		netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2489		return NETDEV_TX_BUSY;
2490	}
2491
2492	wmb();
2493	qdev->req_producer_index++;
2494	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2495		qdev->req_producer_index = 0;
2496	wmb();
2497	ql_write_common_reg_l(qdev,
2498			    &port_regs->CommonRegs.reqQProducerIndex,
2499			    qdev->req_producer_index);
2500
2501	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2502		     "tx queued, slot %d, len %d\n",
2503		     qdev->req_producer_index, skb->len);
2504
2505	atomic_dec(&qdev->tx_count);
2506	return NETDEV_TX_OK;
2507}
2508
2509static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2510{
2511	qdev->req_q_size =
2512	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2513
2514	qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2515
2516	/* The barrier is required to ensure request and response queue
2517	 * addr writes to the registers.
2518	 */
2519	wmb();
2520
2521	qdev->req_q_virt_addr =
2522	    dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2523			       &qdev->req_q_phy_addr, GFP_KERNEL);
2524
2525	if ((qdev->req_q_virt_addr == NULL) ||
2526	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2527		netdev_err(qdev->ndev, "reqQ failed\n");
2528		return -ENOMEM;
2529	}
2530
2531	qdev->rsp_q_virt_addr =
2532	    dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
2533			       &qdev->rsp_q_phy_addr, GFP_KERNEL);
2534
2535	if ((qdev->rsp_q_virt_addr == NULL) ||
2536	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2537		netdev_err(qdev->ndev, "rspQ allocation failed\n");
2538		dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
2539				  qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2540		return -ENOMEM;
2541	}
2542
2543	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2544
2545	return 0;
2546}
2547
2548static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2549{
2550	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2551		netdev_info(qdev->ndev, "Already done\n");
2552		return;
2553	}
2554
2555	dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
2556			  qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2557
2558	qdev->req_q_virt_addr = NULL;
2559
2560	dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
2561			  qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2562
2563	qdev->rsp_q_virt_addr = NULL;
2564
2565	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2566}
2567
2568static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2569{
2570	/* Create Large Buffer Queue */
2571	qdev->lrg_buf_q_size =
2572		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2573	if (qdev->lrg_buf_q_size < PAGE_SIZE)
2574		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2575	else
2576		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2577
2578	qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2579				      sizeof(struct ql_rcv_buf_cb),
2580				      GFP_KERNEL);
2581	if (qdev->lrg_buf == NULL)
2582		return -ENOMEM;
2583
2584	qdev->lrg_buf_q_alloc_virt_addr =
2585		dma_alloc_coherent(&qdev->pdev->dev,
2586				   qdev->lrg_buf_q_alloc_size,
2587				   &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
2588
2589	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2590		netdev_err(qdev->ndev, "lBufQ failed\n");
2591		return -ENOMEM;
2592	}
2593	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2594	qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2595
2596	/* Create Small Buffer Queue */
2597	qdev->small_buf_q_size =
2598		NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2599	if (qdev->small_buf_q_size < PAGE_SIZE)
2600		qdev->small_buf_q_alloc_size = PAGE_SIZE;
2601	else
2602		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2603
2604	qdev->small_buf_q_alloc_virt_addr =
2605		dma_alloc_coherent(&qdev->pdev->dev,
2606				   qdev->small_buf_q_alloc_size,
2607				   &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
2608
2609	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2610		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2611		dma_free_coherent(&qdev->pdev->dev,
2612				  qdev->lrg_buf_q_alloc_size,
2613				  qdev->lrg_buf_q_alloc_virt_addr,
2614				  qdev->lrg_buf_q_alloc_phy_addr);
2615		return -ENOMEM;
2616	}
2617
2618	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2619	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2620	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2621	return 0;
2622}
2623
2624static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2625{
2626	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2627		netdev_info(qdev->ndev, "Already done\n");
2628		return;
2629	}
2630	kfree(qdev->lrg_buf);
2631	dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
2632			  qdev->lrg_buf_q_alloc_virt_addr,
2633			  qdev->lrg_buf_q_alloc_phy_addr);
2634
2635	qdev->lrg_buf_q_virt_addr = NULL;
2636
2637	dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
2638			  qdev->small_buf_q_alloc_virt_addr,
2639			  qdev->small_buf_q_alloc_phy_addr);
2640
2641	qdev->small_buf_q_virt_addr = NULL;
2642
2643	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2644}
2645
2646static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2647{
2648	int i;
2649	struct bufq_addr_element *small_buf_q_entry;
2650
2651	/* Currently we allocate on one of memory and use it for smallbuffers */
2652	qdev->small_buf_total_size =
2653		(QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2654		 QL_SMALL_BUFFER_SIZE);
2655
2656	qdev->small_buf_virt_addr =
2657		dma_alloc_coherent(&qdev->pdev->dev,
2658				   qdev->small_buf_total_size,
2659				   &qdev->small_buf_phy_addr, GFP_KERNEL);
2660
2661	if (qdev->small_buf_virt_addr == NULL) {
2662		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2663		return -ENOMEM;
2664	}
2665
2666	qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2667	qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2668
2669	small_buf_q_entry = qdev->small_buf_q_virt_addr;
2670
2671	/* Initialize the small buffer queue. */
2672	for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2673		small_buf_q_entry->addr_high =
2674		    cpu_to_le32(qdev->small_buf_phy_addr_high);
2675		small_buf_q_entry->addr_low =
2676		    cpu_to_le32(qdev->small_buf_phy_addr_low +
2677				(i * QL_SMALL_BUFFER_SIZE));
2678		small_buf_q_entry++;
2679	}
2680	qdev->small_buf_index = 0;
2681	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2682	return 0;
2683}
2684
2685static void ql_free_small_buffers(struct ql3_adapter *qdev)
2686{
2687	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2688		netdev_info(qdev->ndev, "Already done\n");
2689		return;
2690	}
2691	if (qdev->small_buf_virt_addr != NULL) {
2692		dma_free_coherent(&qdev->pdev->dev,
2693				  qdev->small_buf_total_size,
2694				  qdev->small_buf_virt_addr,
2695				  qdev->small_buf_phy_addr);
2696
2697		qdev->small_buf_virt_addr = NULL;
2698	}
2699}
2700
2701static void ql_free_large_buffers(struct ql3_adapter *qdev)
2702{
2703	int i = 0;
2704	struct ql_rcv_buf_cb *lrg_buf_cb;
2705
2706	for (i = 0; i < qdev->num_large_buffers; i++) {
2707		lrg_buf_cb = &qdev->lrg_buf[i];
2708		if (lrg_buf_cb->skb) {
2709			dev_kfree_skb(lrg_buf_cb->skb);
2710			dma_unmap_single(&qdev->pdev->dev,
2711					 dma_unmap_addr(lrg_buf_cb, mapaddr),
2712					 dma_unmap_len(lrg_buf_cb, maplen),
2713					 DMA_FROM_DEVICE);
2714			memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2715		} else {
2716			break;
2717		}
2718	}
2719}
2720
2721static void ql_init_large_buffers(struct ql3_adapter *qdev)
2722{
2723	int i;
2724	struct ql_rcv_buf_cb *lrg_buf_cb;
2725	struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2726
2727	for (i = 0; i < qdev->num_large_buffers; i++) {
2728		lrg_buf_cb = &qdev->lrg_buf[i];
2729		buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2730		buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2731		buf_addr_ele++;
2732	}
2733	qdev->lrg_buf_index = 0;
2734	qdev->lrg_buf_skb_check = 0;
2735}
2736
2737static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2738{
2739	int i;
2740	struct ql_rcv_buf_cb *lrg_buf_cb;
2741	struct sk_buff *skb;
2742	dma_addr_t map;
2743	int err;
2744
2745	for (i = 0; i < qdev->num_large_buffers; i++) {
2746		lrg_buf_cb = &qdev->lrg_buf[i];
2747		memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2748
2749		skb = netdev_alloc_skb(qdev->ndev,
2750				       qdev->lrg_buffer_len);
2751		if (unlikely(!skb)) {
2752			/* Better luck next round */
2753			netdev_err(qdev->ndev,
2754				   "large buff alloc failed for %d bytes at index %d\n",
2755				   qdev->lrg_buffer_len * 2, i);
2756			ql_free_large_buffers(qdev);
2757			return -ENOMEM;
2758		} else {
2759			lrg_buf_cb->index = i;
2760			/*
2761			 * We save some space to copy the ethhdr from first
2762			 * buffer
2763			 */
2764			skb_reserve(skb, QL_HEADER_SPACE);
2765			map = dma_map_single(&qdev->pdev->dev, skb->data,
2766					     qdev->lrg_buffer_len - QL_HEADER_SPACE,
2767					     DMA_FROM_DEVICE);
2768
2769			err = dma_mapping_error(&qdev->pdev->dev, map);
2770			if (err) {
2771				netdev_err(qdev->ndev,
2772					   "PCI mapping failed with error: %d\n",
2773					   err);
2774				dev_kfree_skb_irq(skb);
2775				ql_free_large_buffers(qdev);
2776				return -ENOMEM;
2777			}
2778
2779			lrg_buf_cb->skb = skb;
2780			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2781			dma_unmap_len_set(lrg_buf_cb, maplen,
2782					  qdev->lrg_buffer_len -
2783					  QL_HEADER_SPACE);
2784			lrg_buf_cb->buf_phy_addr_low =
2785			    cpu_to_le32(LS_64BITS(map));
2786			lrg_buf_cb->buf_phy_addr_high =
2787			    cpu_to_le32(MS_64BITS(map));
2788		}
2789	}
2790	return 0;
2791}
2792
2793static void ql_free_send_free_list(struct ql3_adapter *qdev)
2794{
2795	struct ql_tx_buf_cb *tx_cb;
2796	int i;
2797
2798	tx_cb = &qdev->tx_buf[0];
2799	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2800		kfree(tx_cb->oal);
2801		tx_cb->oal = NULL;
2802		tx_cb++;
2803	}
2804}
2805
2806static int ql_create_send_free_list(struct ql3_adapter *qdev)
2807{
2808	struct ql_tx_buf_cb *tx_cb;
2809	int i;
2810	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2811
2812	/* Create free list of transmit buffers */
2813	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2814
2815		tx_cb = &qdev->tx_buf[i];
2816		tx_cb->skb = NULL;
2817		tx_cb->queue_entry = req_q_curr;
2818		req_q_curr++;
2819		tx_cb->oal = kmalloc(512, GFP_KERNEL);
2820		if (tx_cb->oal == NULL)
2821			return -ENOMEM;
2822	}
2823	return 0;
2824}
2825
2826static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2827{
2828	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2829		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2830		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2831	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2832		/*
2833		 * Bigger buffers, so less of them.
2834		 */
2835		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2836		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2837	} else {
2838		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
2839			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2840		return -ENOMEM;
2841	}
2842	qdev->num_large_buffers =
2843		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2844	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2845	qdev->max_frame_size =
2846		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2847
2848	/*
2849	 * First allocate a page of shared memory and use it for shadow
2850	 * locations of Network Request Queue Consumer Address Register and
2851	 * Network Completion Queue Producer Index Register
2852	 */
2853	qdev->shadow_reg_virt_addr =
2854		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2855				   &qdev->shadow_reg_phy_addr, GFP_KERNEL);
2856
2857	if (qdev->shadow_reg_virt_addr != NULL) {
2858		qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2859		qdev->req_consumer_index_phy_addr_high =
2860			MS_64BITS(qdev->shadow_reg_phy_addr);
2861		qdev->req_consumer_index_phy_addr_low =
2862			LS_64BITS(qdev->shadow_reg_phy_addr);
2863
2864		qdev->prsp_producer_index =
2865			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2866		qdev->rsp_producer_index_phy_addr_high =
2867			qdev->req_consumer_index_phy_addr_high;
2868		qdev->rsp_producer_index_phy_addr_low =
2869			qdev->req_consumer_index_phy_addr_low + 8;
2870	} else {
2871		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2872		return -ENOMEM;
2873	}
2874
2875	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2876		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2877		goto err_req_rsp;
2878	}
2879
2880	if (ql_alloc_buffer_queues(qdev) != 0) {
2881		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2882		goto err_buffer_queues;
2883	}
2884
2885	if (ql_alloc_small_buffers(qdev) != 0) {
2886		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2887		goto err_small_buffers;
2888	}
2889
2890	if (ql_alloc_large_buffers(qdev) != 0) {
2891		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2892		goto err_small_buffers;
2893	}
2894
2895	/* Initialize the large buffer queue. */
2896	ql_init_large_buffers(qdev);
2897	if (ql_create_send_free_list(qdev))
2898		goto err_free_list;
2899
2900	qdev->rsp_current = qdev->rsp_q_virt_addr;
2901
2902	return 0;
2903err_free_list:
2904	ql_free_send_free_list(qdev);
2905err_small_buffers:
2906	ql_free_buffer_queues(qdev);
2907err_buffer_queues:
2908	ql_free_net_req_rsp_queues(qdev);
2909err_req_rsp:
2910	dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2911			  qdev->shadow_reg_virt_addr,
2912			  qdev->shadow_reg_phy_addr);
2913
2914	return -ENOMEM;
2915}
2916
2917static void ql_free_mem_resources(struct ql3_adapter *qdev)
2918{
2919	ql_free_send_free_list(qdev);
2920	ql_free_large_buffers(qdev);
2921	ql_free_small_buffers(qdev);
2922	ql_free_buffer_queues(qdev);
2923	ql_free_net_req_rsp_queues(qdev);
2924	if (qdev->shadow_reg_virt_addr != NULL) {
2925		dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
2926				  qdev->shadow_reg_virt_addr,
2927				  qdev->shadow_reg_phy_addr);
2928		qdev->shadow_reg_virt_addr = NULL;
2929	}
2930}
2931
2932static int ql_init_misc_registers(struct ql3_adapter *qdev)
2933{
2934	struct ql3xxx_local_ram_registers __iomem *local_ram =
2935	    (void __iomem *)qdev->mem_map_registers;
2936
2937	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2938			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2939			 2) << 4))
2940		return -1;
2941
2942	ql_write_page2_reg(qdev,
2943			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2944
2945	ql_write_page2_reg(qdev,
2946			   &local_ram->maxBufletCount,
2947			   qdev->nvram_data.bufletCount);
2948
2949	ql_write_page2_reg(qdev,
2950			   &local_ram->freeBufletThresholdLow,
2951			   (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2952			   (qdev->nvram_data.tcpWindowThreshold0));
2953
2954	ql_write_page2_reg(qdev,
2955			   &local_ram->freeBufletThresholdHigh,
2956			   qdev->nvram_data.tcpWindowThreshold50);
2957
2958	ql_write_page2_reg(qdev,
2959			   &local_ram->ipHashTableBase,
2960			   (qdev->nvram_data.ipHashTableBaseHi << 16) |
2961			   qdev->nvram_data.ipHashTableBaseLo);
2962	ql_write_page2_reg(qdev,
2963			   &local_ram->ipHashTableCount,
2964			   qdev->nvram_data.ipHashTableSize);
2965	ql_write_page2_reg(qdev,
2966			   &local_ram->tcpHashTableBase,
2967			   (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2968			   qdev->nvram_data.tcpHashTableBaseLo);
2969	ql_write_page2_reg(qdev,
2970			   &local_ram->tcpHashTableCount,
2971			   qdev->nvram_data.tcpHashTableSize);
2972	ql_write_page2_reg(qdev,
2973			   &local_ram->ncbBase,
2974			   (qdev->nvram_data.ncbTableBaseHi << 16) |
2975			   qdev->nvram_data.ncbTableBaseLo);
2976	ql_write_page2_reg(qdev,
2977			   &local_ram->maxNcbCount,
2978			   qdev->nvram_data.ncbTableSize);
2979	ql_write_page2_reg(qdev,
2980			   &local_ram->drbBase,
2981			   (qdev->nvram_data.drbTableBaseHi << 16) |
2982			   qdev->nvram_data.drbTableBaseLo);
2983	ql_write_page2_reg(qdev,
2984			   &local_ram->maxDrbCount,
2985			   qdev->nvram_data.drbTableSize);
2986	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2987	return 0;
2988}
2989
2990static int ql_adapter_initialize(struct ql3_adapter *qdev)
2991{
2992	u32 value;
2993	struct ql3xxx_port_registers __iomem *port_regs =
2994		qdev->mem_map_registers;
2995	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
2996	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2997		(void __iomem *)port_regs;
2998	u32 delay = 10;
2999	int status = 0;
3000
3001	if (ql_mii_setup(qdev))
3002		return -1;
3003
3004	/* Bring out PHY out of reset */
3005	ql_write_common_reg(qdev, spir,
3006			    (ISP_SERIAL_PORT_IF_WE |
3007			     (ISP_SERIAL_PORT_IF_WE << 16)));
3008	/* Give the PHY time to come out of reset. */
3009	mdelay(100);
3010	qdev->port_link_state = LS_DOWN;
3011	netif_carrier_off(qdev->ndev);
3012
3013	/* V2 chip fix for ARS-39168. */
3014	ql_write_common_reg(qdev, spir,
3015			    (ISP_SERIAL_PORT_IF_SDE |
3016			     (ISP_SERIAL_PORT_IF_SDE << 16)));
3017
3018	/* Request Queue Registers */
3019	*((u32 *)(qdev->preq_consumer_index)) = 0;
3020	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3021	qdev->req_producer_index = 0;
3022
3023	ql_write_page1_reg(qdev,
3024			   &hmem_regs->reqConsumerIndexAddrHigh,
3025			   qdev->req_consumer_index_phy_addr_high);
3026	ql_write_page1_reg(qdev,
3027			   &hmem_regs->reqConsumerIndexAddrLow,
3028			   qdev->req_consumer_index_phy_addr_low);
3029
3030	ql_write_page1_reg(qdev,
3031			   &hmem_regs->reqBaseAddrHigh,
3032			   MS_64BITS(qdev->req_q_phy_addr));
3033	ql_write_page1_reg(qdev,
3034			   &hmem_regs->reqBaseAddrLow,
3035			   LS_64BITS(qdev->req_q_phy_addr));
3036	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3037
3038	/* Response Queue Registers */
3039	*((__le16 *) (qdev->prsp_producer_index)) = 0;
3040	qdev->rsp_consumer_index = 0;
3041	qdev->rsp_current = qdev->rsp_q_virt_addr;
3042
3043	ql_write_page1_reg(qdev,
3044			   &hmem_regs->rspProducerIndexAddrHigh,
3045			   qdev->rsp_producer_index_phy_addr_high);
3046
3047	ql_write_page1_reg(qdev,
3048			   &hmem_regs->rspProducerIndexAddrLow,
3049			   qdev->rsp_producer_index_phy_addr_low);
3050
3051	ql_write_page1_reg(qdev,
3052			   &hmem_regs->rspBaseAddrHigh,
3053			   MS_64BITS(qdev->rsp_q_phy_addr));
3054
3055	ql_write_page1_reg(qdev,
3056			   &hmem_regs->rspBaseAddrLow,
3057			   LS_64BITS(qdev->rsp_q_phy_addr));
3058
3059	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3060
3061	/* Large Buffer Queue */
3062	ql_write_page1_reg(qdev,
3063			   &hmem_regs->rxLargeQBaseAddrHigh,
3064			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
3065
3066	ql_write_page1_reg(qdev,
3067			   &hmem_regs->rxLargeQBaseAddrLow,
3068			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
3069
3070	ql_write_page1_reg(qdev,
3071			   &hmem_regs->rxLargeQLength,
3072			   qdev->num_lbufq_entries);
3073
3074	ql_write_page1_reg(qdev,
3075			   &hmem_regs->rxLargeBufferLength,
3076			   qdev->lrg_buffer_len);
3077
3078	/* Small Buffer Queue */
3079	ql_write_page1_reg(qdev,
3080			   &hmem_regs->rxSmallQBaseAddrHigh,
3081			   MS_64BITS(qdev->small_buf_q_phy_addr));
3082
3083	ql_write_page1_reg(qdev,
3084			   &hmem_regs->rxSmallQBaseAddrLow,
3085			   LS_64BITS(qdev->small_buf_q_phy_addr));
3086
3087	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3088	ql_write_page1_reg(qdev,
3089			   &hmem_regs->rxSmallBufferLength,
3090			   QL_SMALL_BUFFER_SIZE);
3091
3092	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3093	qdev->small_buf_release_cnt = 8;
3094	qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3095	qdev->lrg_buf_release_cnt = 8;
3096	qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3097	qdev->small_buf_index = 0;
3098	qdev->lrg_buf_index = 0;
3099	qdev->lrg_buf_free_count = 0;
3100	qdev->lrg_buf_free_head = NULL;
3101	qdev->lrg_buf_free_tail = NULL;
3102
3103	ql_write_common_reg(qdev,
3104			    &port_regs->CommonRegs.
3105			    rxSmallQProducerIndex,
3106			    qdev->small_buf_q_producer_index);
3107	ql_write_common_reg(qdev,
3108			    &port_regs->CommonRegs.
3109			    rxLargeQProducerIndex,
3110			    qdev->lrg_buf_q_producer_index);
3111
3112	/*
3113	 * Find out if the chip has already been initialized.  If it has, then
3114	 * we skip some of the initialization.
3115	 */
3116	clear_bit(QL_LINK_MASTER, &qdev->flags);
3117	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3118	if ((value & PORT_STATUS_IC) == 0) {
3119
3120		/* Chip has not been configured yet, so let it rip. */
3121		if (ql_init_misc_registers(qdev)) {
3122			status = -1;
3123			goto out;
3124		}
3125
3126		value = qdev->nvram_data.tcpMaxWindowSize;
3127		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3128
3129		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3130
3131		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3132				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3133				 * 2) << 13)) {
3134			status = -1;
3135			goto out;
3136		}
3137		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3138		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3139				   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3140				     16) | (INTERNAL_CHIP_SD |
3141					    INTERNAL_CHIP_WE)));
3142		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3143	}
3144
3145	if (qdev->mac_index)
3146		ql_write_page0_reg(qdev,
3147				   &port_regs->mac1MaxFrameLengthReg,
3148				   qdev->max_frame_size);
3149	else
3150		ql_write_page0_reg(qdev,
3151					   &port_regs->mac0MaxFrameLengthReg,
3152					   qdev->max_frame_size);
3153
3154	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3155			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3156			 2) << 7)) {
3157		status = -1;
3158		goto out;
3159	}
3160
3161	PHY_Setup(qdev);
3162	ql_init_scan_mode(qdev);
3163	ql_get_phy_owner(qdev);
3164
3165	/* Load the MAC Configuration */
3166
3167	/* Program lower 32 bits of the MAC address */
3168	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3169			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3170	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3171			   ((qdev->ndev->dev_addr[2] << 24)
3172			    | (qdev->ndev->dev_addr[3] << 16)
3173			    | (qdev->ndev->dev_addr[4] << 8)
3174			    | qdev->ndev->dev_addr[5]));
3175
3176	/* Program top 16 bits of the MAC address */
3177	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3178			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3179	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3180			   ((qdev->ndev->dev_addr[0] << 8)
3181			    | qdev->ndev->dev_addr[1]));
3182
3183	/* Enable Primary MAC */
3184	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3185			   ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3186			    MAC_ADDR_INDIRECT_PTR_REG_PE));
3187
3188	/* Clear Primary and Secondary IP addresses */
3189	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3190			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3191			    (qdev->mac_index << 2)));
3192	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3193
3194	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3195			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3196			    ((qdev->mac_index << 2) + 1)));
3197	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3198
3199	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3200
3201	/* Indicate Configuration Complete */
3202	ql_write_page0_reg(qdev,
3203			   &port_regs->portControl,
3204			   ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3205
3206	do {
3207		value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3208		if (value & PORT_STATUS_IC)
3209			break;
3210		spin_unlock_irq(&qdev->hw_lock);
3211		msleep(500);
3212		spin_lock_irq(&qdev->hw_lock);
3213	} while (--delay);
3214
3215	if (delay == 0) {
3216		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3217		status = -1;
3218		goto out;
3219	}
3220
3221	/* Enable Ethernet Function */
3222	if (qdev->device_id == QL3032_DEVICE_ID) {
3223		value =
3224		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3225		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3226			QL3032_PORT_CONTROL_ET);
3227		ql_write_page0_reg(qdev, &port_regs->functionControl,
3228				   ((value << 16) | value));
3229	} else {
3230		value =
3231		    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3232		     PORT_CONTROL_HH);
3233		ql_write_page0_reg(qdev, &port_regs->portControl,
3234				   ((value << 16) | value));
3235	}
3236
3237
3238out:
3239	return status;
3240}
3241
3242/*
3243 * Caller holds hw_lock.
3244 */
3245static int ql_adapter_reset(struct ql3_adapter *qdev)
3246{
3247	struct ql3xxx_port_registers __iomem *port_regs =
3248		qdev->mem_map_registers;
3249	int status = 0;
3250	u16 value;
3251	int max_wait_time;
3252
3253	set_bit(QL_RESET_ACTIVE, &qdev->flags);
3254	clear_bit(QL_RESET_DONE, &qdev->flags);
3255
3256	/*
3257	 * Issue soft reset to chip.
3258	 */
3259	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3260	ql_write_common_reg(qdev,
3261			    &port_regs->CommonRegs.ispControlStatus,
3262			    ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3263
3264	/* Wait 3 seconds for reset to complete. */
3265	netdev_printk(KERN_DEBUG, qdev->ndev,
3266		      "Wait 10 milliseconds for reset to complete\n");
3267
3268	/* Wait until the firmware tells us the Soft Reset is done */
3269	max_wait_time = 5;
3270	do {
3271		value =
3272		    ql_read_common_reg(qdev,
3273				       &port_regs->CommonRegs.ispControlStatus);
3274		if ((value & ISP_CONTROL_SR) == 0)
3275			break;
3276
3277		mdelay(1000);
3278	} while ((--max_wait_time));
3279
3280	/*
3281	 * Also, make sure that the Network Reset Interrupt bit has been
3282	 * cleared after the soft reset has taken place.
3283	 */
3284	value =
3285	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3286	if (value & ISP_CONTROL_RI) {
3287		netdev_printk(KERN_DEBUG, qdev->ndev,
3288			      "clearing RI after reset\n");
3289		ql_write_common_reg(qdev,
3290				    &port_regs->CommonRegs.
3291				    ispControlStatus,
3292				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3293	}
3294
3295	if (max_wait_time == 0) {
3296		/* Issue Force Soft Reset */
3297		ql_write_common_reg(qdev,
3298				    &port_regs->CommonRegs.
3299				    ispControlStatus,
3300				    ((ISP_CONTROL_FSR << 16) |
3301				     ISP_CONTROL_FSR));
3302		/*
3303		 * Wait until the firmware tells us the Force Soft Reset is
3304		 * done
3305		 */
3306		max_wait_time = 5;
3307		do {
3308			value = ql_read_common_reg(qdev,
3309						   &port_regs->CommonRegs.
3310						   ispControlStatus);
3311			if ((value & ISP_CONTROL_FSR) == 0)
3312				break;
3313			mdelay(1000);
3314		} while ((--max_wait_time));
3315	}
3316	if (max_wait_time == 0)
3317		status = 1;
3318
3319	clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3320	set_bit(QL_RESET_DONE, &qdev->flags);
3321	return status;
3322}
3323
3324static void ql_set_mac_info(struct ql3_adapter *qdev)
3325{
3326	struct ql3xxx_port_registers __iomem *port_regs =
3327		qdev->mem_map_registers;
3328	u32 value, port_status;
3329	u8 func_number;
3330
3331	/* Get the function number */
3332	value =
3333	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3334	func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3335	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3336	switch (value & ISP_CONTROL_FN_MASK) {
3337	case ISP_CONTROL_FN0_NET:
3338		qdev->mac_index = 0;
3339		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3340		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3341		qdev->PHYAddr = PORT0_PHY_ADDRESS;
3342		if (port_status & PORT_STATUS_SM0)
3343			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3344		else
3345			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3346		break;
3347
3348	case ISP_CONTROL_FN1_NET:
3349		qdev->mac_index = 1;
3350		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3351		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3352		qdev->PHYAddr = PORT1_PHY_ADDRESS;
3353		if (port_status & PORT_STATUS_SM1)
3354			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3355		else
3356			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3357		break;
3358
3359	case ISP_CONTROL_FN0_SCSI:
3360	case ISP_CONTROL_FN1_SCSI:
3361	default:
3362		netdev_printk(KERN_DEBUG, qdev->ndev,
3363			      "Invalid function number, ispControlStatus = 0x%x\n",
3364			      value);
3365		break;
3366	}
3367	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3368}
3369
3370static void ql_display_dev_info(struct net_device *ndev)
3371{
3372	struct ql3_adapter *qdev = netdev_priv(ndev);
3373	struct pci_dev *pdev = qdev->pdev;
3374
3375	netdev_info(ndev,
3376		    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3377		    DRV_NAME, qdev->index, qdev->chip_rev_id,
3378		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3379		    qdev->pci_slot);
3380	netdev_info(ndev, "%s Interface\n",
3381		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3382
3383	/*
3384	 * Print PCI bus width/type.
3385	 */
3386	netdev_info(ndev, "Bus interface is %s %s\n",
3387		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3388		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
3389
3390	netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
3391		    qdev->mem_map_registers);
3392	netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3393
3394	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3395}
3396
3397static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3398{
3399	struct net_device *ndev = qdev->ndev;
3400	int retval = 0;
3401
3402	netif_stop_queue(ndev);
3403	netif_carrier_off(ndev);
3404
3405	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3406	clear_bit(QL_LINK_MASTER, &qdev->flags);
3407
3408	ql_disable_interrupts(qdev);
3409
3410	free_irq(qdev->pdev->irq, ndev);
3411
3412	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3413		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3414		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3415		pci_disable_msi(qdev->pdev);
3416	}
3417
3418	del_timer_sync(&qdev->adapter_timer);
3419
3420	napi_disable(&qdev->napi);
3421
3422	if (do_reset) {
3423		int soft_reset;
3424		unsigned long hw_flags;
3425
3426		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3427		if (ql_wait_for_drvr_lock(qdev)) {
3428			soft_reset = ql_adapter_reset(qdev);
3429			if (soft_reset) {
3430				netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3431					   qdev->index);
3432			}
3433			netdev_err(ndev,
3434				   "Releasing driver lock via chip reset\n");
3435		} else {
3436			netdev_err(ndev,
3437				   "Could not acquire driver lock to do reset!\n");
3438			retval = -1;
3439		}
3440		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3441	}
3442	ql_free_mem_resources(qdev);
3443	return retval;
3444}
3445
3446static int ql_adapter_up(struct ql3_adapter *qdev)
3447{
3448	struct net_device *ndev = qdev->ndev;
3449	int err;
3450	unsigned long irq_flags = IRQF_SHARED;
3451	unsigned long hw_flags;
3452
3453	if (ql_alloc_mem_resources(qdev)) {
3454		netdev_err(ndev, "Unable to  allocate buffers\n");
3455		return -ENOMEM;
3456	}
3457
3458	if (qdev->msi) {
3459		if (pci_enable_msi(qdev->pdev)) {
3460			netdev_err(ndev,
3461				   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
3462			qdev->msi = 0;
3463		} else {
3464			netdev_info(ndev, "MSI Enabled...\n");
3465			set_bit(QL_MSI_ENABLED, &qdev->flags);
3466			irq_flags &= ~IRQF_SHARED;
3467		}
3468	}
3469
3470	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3471			  irq_flags, ndev->name, ndev);
3472	if (err) {
3473		netdev_err(ndev,
3474			   "Failed to reserve interrupt %d - already in use\n",
3475			   qdev->pdev->irq);
3476		goto err_irq;
3477	}
3478
3479	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3480
3481	err = ql_wait_for_drvr_lock(qdev);
3482	if (err) {
3483		err = ql_adapter_initialize(qdev);
3484		if (err) {
3485			netdev_err(ndev, "Unable to initialize adapter\n");
3486			goto err_init;
3487		}
3488		netdev_err(ndev, "Releasing driver lock\n");
3489		ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3490	} else {
3491		netdev_err(ndev, "Could not acquire driver lock\n");
3492		goto err_lock;
3493	}
3494
3495	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3496
3497	set_bit(QL_ADAPTER_UP, &qdev->flags);
3498
3499	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3500
3501	napi_enable(&qdev->napi);
3502	ql_enable_interrupts(qdev);
3503	return 0;
3504
3505err_init:
3506	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3507err_lock:
3508	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3509	free_irq(qdev->pdev->irq, ndev);
3510err_irq:
3511	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3512		netdev_info(ndev, "calling pci_disable_msi()\n");
3513		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3514		pci_disable_msi(qdev->pdev);
3515	}
3516	return err;
3517}
3518
3519static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3520{
3521	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3522		netdev_err(qdev->ndev,
3523			   "Driver up/down cycle failed, closing device\n");
3524		rtnl_lock();
3525		dev_close(qdev->ndev);
3526		rtnl_unlock();
3527		return -1;
3528	}
3529	return 0;
3530}
3531
3532static int ql3xxx_close(struct net_device *ndev)
3533{
3534	struct ql3_adapter *qdev = netdev_priv(ndev);
3535
3536	/*
3537	 * Wait for device to recover from a reset.
3538	 * (Rarely happens, but possible.)
3539	 */
3540	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3541		msleep(50);
3542
3543	ql_adapter_down(qdev, QL_DO_RESET);
3544	return 0;
3545}
3546
3547static int ql3xxx_open(struct net_device *ndev)
3548{
3549	struct ql3_adapter *qdev = netdev_priv(ndev);
3550	return ql_adapter_up(qdev);
3551}
3552
3553static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3554{
3555	struct ql3_adapter *qdev = netdev_priv(ndev);
3556	struct ql3xxx_port_registers __iomem *port_regs =
3557			qdev->mem_map_registers;
3558	struct sockaddr *addr = p;
3559	unsigned long hw_flags;
3560
3561	if (netif_running(ndev))
3562		return -EBUSY;
3563
3564	if (!is_valid_ether_addr(addr->sa_data))
3565		return -EADDRNOTAVAIL;
3566
3567	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3568
3569	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3570	/* Program lower 32 bits of the MAC address */
3571	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3572			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3573	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3574			   ((ndev->dev_addr[2] << 24) | (ndev->
3575							 dev_addr[3] << 16) |
3576			    (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3577
3578	/* Program top 16 bits of the MAC address */
3579	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3580			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3581	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3582			   ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3583	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3584
3585	return 0;
3586}
3587
3588static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
3589{
3590	struct ql3_adapter *qdev = netdev_priv(ndev);
3591
3592	netdev_err(ndev, "Resetting...\n");
3593	/*
3594	 * Stop the queues, we've got a problem.
3595	 */
3596	netif_stop_queue(ndev);
3597
3598	/*
3599	 * Wake up the worker to process this event.
3600	 */
3601	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3602}
3603
3604static void ql_reset_work(struct work_struct *work)
3605{
3606	struct ql3_adapter *qdev =
3607		container_of(work, struct ql3_adapter, reset_work.work);
3608	struct net_device *ndev = qdev->ndev;
3609	u32 value;
3610	struct ql_tx_buf_cb *tx_cb;
3611	int max_wait_time, i;
3612	struct ql3xxx_port_registers __iomem *port_regs =
3613		qdev->mem_map_registers;
3614	unsigned long hw_flags;
3615
3616	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3617		clear_bit(QL_LINK_MASTER, &qdev->flags);
3618
3619		/*
3620		 * Loop through the active list and return the skb.
3621		 */
3622		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3623			int j;
3624			tx_cb = &qdev->tx_buf[i];
3625			if (tx_cb->skb) {
3626				netdev_printk(KERN_DEBUG, ndev,
3627					      "Freeing lost SKB\n");
3628				dma_unmap_single(&qdev->pdev->dev,
3629						 dma_unmap_addr(&tx_cb->map[0], mapaddr),
3630						 dma_unmap_len(&tx_cb->map[0], maplen),
3631						 DMA_TO_DEVICE);
3632				for (j = 1; j < tx_cb->seg_count; j++) {
3633					dma_unmap_page(&qdev->pdev->dev,
3634						       dma_unmap_addr(&tx_cb->map[j], mapaddr),
3635						       dma_unmap_len(&tx_cb->map[j], maplen),
3636						       DMA_TO_DEVICE);
3637				}
3638				dev_kfree_skb(tx_cb->skb);
3639				tx_cb->skb = NULL;
3640			}
3641		}
3642
3643		netdev_err(ndev, "Clearing NRI after reset\n");
3644		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3645		ql_write_common_reg(qdev,
3646				    &port_regs->CommonRegs.
3647				    ispControlStatus,
3648				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3649		/*
3650		 * Wait the for Soft Reset to Complete.
3651		 */
3652		max_wait_time = 10;
3653		do {
3654			value = ql_read_common_reg(qdev,
3655						   &port_regs->CommonRegs.
3656
3657						   ispControlStatus);
3658			if ((value & ISP_CONTROL_SR) == 0) {
3659				netdev_printk(KERN_DEBUG, ndev,
3660					      "reset completed\n");
3661				break;
3662			}
3663
3664			if (value & ISP_CONTROL_RI) {
3665				netdev_printk(KERN_DEBUG, ndev,
3666					      "clearing NRI after reset\n");
3667				ql_write_common_reg(qdev,
3668						    &port_regs->
3669						    CommonRegs.
3670						    ispControlStatus,
3671						    ((ISP_CONTROL_RI <<
3672						      16) | ISP_CONTROL_RI));
3673			}
3674
3675			spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3676			ssleep(1);
3677			spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3678		} while (--max_wait_time);
3679		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3680
3681		if (value & ISP_CONTROL_SR) {
3682
3683			/*
3684			 * Set the reset flags and clear the board again.
3685			 * Nothing else to do...
3686			 */
3687			netdev_err(ndev,
3688				   "Timed out waiting for reset to complete\n");
3689			netdev_err(ndev, "Do a reset\n");
3690			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3691			clear_bit(QL_RESET_START, &qdev->flags);
3692			ql_cycle_adapter(qdev, QL_DO_RESET);
3693			return;
3694		}
3695
3696		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3697		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3698		clear_bit(QL_RESET_START, &qdev->flags);
3699		ql_cycle_adapter(qdev, QL_NO_RESET);
3700	}
3701}
3702
3703static void ql_tx_timeout_work(struct work_struct *work)
3704{
3705	struct ql3_adapter *qdev =
3706		container_of(work, struct ql3_adapter, tx_timeout_work.work);
3707
3708	ql_cycle_adapter(qdev, QL_DO_RESET);
3709}
3710
3711static void ql_get_board_info(struct ql3_adapter *qdev)
3712{
3713	struct ql3xxx_port_registers __iomem *port_regs =
3714		qdev->mem_map_registers;
3715	u32 value;
3716
3717	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3718
3719	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3720	if (value & PORT_STATUS_64)
3721		qdev->pci_width = 64;
3722	else
3723		qdev->pci_width = 32;
3724	if (value & PORT_STATUS_X)
3725		qdev->pci_x = 1;
3726	else
3727		qdev->pci_x = 0;
3728	qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3729}
3730
3731static void ql3xxx_timer(struct timer_list *t)
3732{
3733	struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
3734	queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3735}
3736
3737static const struct net_device_ops ql3xxx_netdev_ops = {
3738	.ndo_open		= ql3xxx_open,
3739	.ndo_start_xmit		= ql3xxx_send,
3740	.ndo_stop		= ql3xxx_close,
3741	.ndo_validate_addr	= eth_validate_addr,
3742	.ndo_set_mac_address	= ql3xxx_set_mac_address,
3743	.ndo_tx_timeout		= ql3xxx_tx_timeout,
3744};
3745
3746static int ql3xxx_probe(struct pci_dev *pdev,
3747			const struct pci_device_id *pci_entry)
3748{
3749	struct net_device *ndev = NULL;
3750	struct ql3_adapter *qdev = NULL;
3751	static int cards_found;
3752	int pci_using_dac, err;
3753
3754	err = pci_enable_device(pdev);
3755	if (err) {
3756		pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3757		goto err_out;
3758	}
3759
3760	err = pci_request_regions(pdev, DRV_NAME);
3761	if (err) {
3762		pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3763		goto err_out_disable_pdev;
3764	}
3765
3766	pci_set_master(pdev);
3767
3768	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
3769		pci_using_dac = 1;
3770	else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
3771		pci_using_dac = 0;
3772
3773	if (err) {
3774		pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3775		goto err_out_free_regions;
3776	}
3777
3778	ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3779	if (!ndev) {
3780		err = -ENOMEM;
3781		goto err_out_free_regions;
3782	}
3783
3784	SET_NETDEV_DEV(ndev, &pdev->dev);
3785
3786	pci_set_drvdata(pdev, ndev);
3787
3788	qdev = netdev_priv(ndev);
3789	qdev->index = cards_found;
3790	qdev->ndev = ndev;
3791	qdev->pdev = pdev;
3792	qdev->device_id = pci_entry->device;
3793	qdev->port_link_state = LS_DOWN;
3794	if (msi)
3795		qdev->msi = 1;
3796
3797	qdev->msg_enable = netif_msg_init(debug, default_msg);
3798
3799	if (pci_using_dac)
3800		ndev->features |= NETIF_F_HIGHDMA;
3801	if (qdev->device_id == QL3032_DEVICE_ID)
3802		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3803
3804	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3805	if (!qdev->mem_map_registers) {
3806		pr_err("%s: cannot map device registers\n", pci_name(pdev));
3807		err = -EIO;
3808		goto err_out_free_ndev;
3809	}
3810
3811	spin_lock_init(&qdev->adapter_lock);
3812	spin_lock_init(&qdev->hw_lock);
3813
3814	/* Set driver entry points */
3815	ndev->netdev_ops = &ql3xxx_netdev_ops;
3816	ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3817	ndev->watchdog_timeo = 5 * HZ;
3818
3819	netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3820
3821	ndev->irq = pdev->irq;
3822
3823	/* make sure the EEPROM is good */
3824	if (ql_get_nvram_params(qdev)) {
3825		pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3826			 __func__, qdev->index);
3827		err = -EIO;
3828		goto err_out_iounmap;
3829	}
3830
3831	ql_set_mac_info(qdev);
3832
3833	/* Validate and set parameters */
3834	if (qdev->mac_index) {
3835		ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3836		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3837	} else {
3838		ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3839		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3840	}
3841
3842	ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3843
3844	/* Record PCI bus information. */
3845	ql_get_board_info(qdev);
3846
3847	/*
3848	 * Set the Maximum Memory Read Byte Count value. We do this to handle
3849	 * jumbo frames.
3850	 */
3851	if (qdev->pci_x)
3852		pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3853
3854	err = register_netdev(ndev);
3855	if (err) {
3856		pr_err("%s: cannot register net device\n", pci_name(pdev));
3857		goto err_out_iounmap;
3858	}
3859
3860	/* we're going to reset, so assume we have no link for now */
3861
3862	netif_carrier_off(ndev);
3863	netif_stop_queue(ndev);
3864
3865	qdev->workqueue = create_singlethread_workqueue(ndev->name);
3866	if (!qdev->workqueue) {
3867		unregister_netdev(ndev);
3868		err = -ENOMEM;
3869		goto err_out_iounmap;
3870	}
3871
3872	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3873	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3874	INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3875
3876	timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
3877	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
3878
3879	if (!cards_found) {
3880		pr_alert("%s\n", DRV_STRING);
3881		pr_alert("Driver name: %s, Version: %s\n",
3882			 DRV_NAME, DRV_VERSION);
3883	}
3884	ql_display_dev_info(ndev);
3885
3886	cards_found++;
3887	return 0;
3888
3889err_out_iounmap:
3890	iounmap(qdev->mem_map_registers);
3891err_out_free_ndev:
3892	free_netdev(ndev);
3893err_out_free_regions:
3894	pci_release_regions(pdev);
3895err_out_disable_pdev:
3896	pci_disable_device(pdev);
3897err_out:
3898	return err;
3899}
3900
3901static void ql3xxx_remove(struct pci_dev *pdev)
3902{
3903	struct net_device *ndev = pci_get_drvdata(pdev);
3904	struct ql3_adapter *qdev = netdev_priv(ndev);
3905
3906	unregister_netdev(ndev);
3907
3908	ql_disable_interrupts(qdev);
3909
3910	if (qdev->workqueue) {
3911		cancel_delayed_work(&qdev->reset_work);
3912		cancel_delayed_work(&qdev->tx_timeout_work);
3913		destroy_workqueue(qdev->workqueue);
3914		qdev->workqueue = NULL;
3915	}
3916
3917	iounmap(qdev->mem_map_registers);
3918	pci_release_regions(pdev);
3919	free_netdev(ndev);
3920}
3921
3922static struct pci_driver ql3xxx_driver = {
3923
3924	.name = DRV_NAME,
3925	.id_table = ql3xxx_pci_tbl,
3926	.probe = ql3xxx_probe,
3927	.remove = ql3xxx_remove,
3928};
3929
3930module_pci_driver(ql3xxx_driver);