Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1 /***************************************************************************
   2 *
   3 * Copyright (C) 2007,2008  SMSC
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License
   7 * as published by the Free Software Foundation; either version 2
   8 * of the License, or (at your option) any later version.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
  17 *
  18 ***************************************************************************
  19 */
  20
  21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22
  23#include <linux/interrupt.h>
  24#include <linux/kernel.h>
  25#include <linux/netdevice.h>
  26#include <linux/phy.h>
  27#include <linux/pci.h>
  28#include <linux/if_vlan.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/crc32.h>
  31#include <linux/slab.h>
  32#include <linux/module.h>
  33#include <asm/unaligned.h>
  34#include "smsc9420.h"
  35
  36#define DRV_NAME		"smsc9420"
  37#define DRV_MDIONAME		"smsc9420-mdio"
  38#define DRV_DESCRIPTION		"SMSC LAN9420 driver"
  39#define DRV_VERSION		"1.01"
  40
  41MODULE_LICENSE("GPL");
  42MODULE_VERSION(DRV_VERSION);
  43
  44struct smsc9420_dma_desc {
  45	u32 status;
  46	u32 length;
  47	u32 buffer1;
  48	u32 buffer2;
  49};
  50
  51struct smsc9420_ring_info {
  52	struct sk_buff *skb;
  53	dma_addr_t mapping;
  54};
  55
  56struct smsc9420_pdata {
  57	void __iomem *ioaddr;
  58	struct pci_dev *pdev;
  59	struct net_device *dev;
  60
  61	struct smsc9420_dma_desc *rx_ring;
  62	struct smsc9420_dma_desc *tx_ring;
  63	struct smsc9420_ring_info *tx_buffers;
  64	struct smsc9420_ring_info *rx_buffers;
  65	dma_addr_t rx_dma_addr;
  66	dma_addr_t tx_dma_addr;
  67	int tx_ring_head, tx_ring_tail;
  68	int rx_ring_head, rx_ring_tail;
  69
  70	spinlock_t int_lock;
  71	spinlock_t phy_lock;
  72
  73	struct napi_struct napi;
  74
  75	bool software_irq_signal;
  76	bool rx_csum;
  77	u32 msg_enable;
  78
  79	struct mii_bus *mii_bus;
  80	int last_duplex;
  81	int last_carrier;
  82};
  83
  84static const struct pci_device_id smsc9420_id_table[] = {
  85	{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
  86	{ 0, }
  87};
  88
  89MODULE_DEVICE_TABLE(pci, smsc9420_id_table);
  90
  91#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  92
  93static uint smsc_debug;
  94static uint debug = -1;
  95module_param(debug, uint, 0);
  96MODULE_PARM_DESC(debug, "debug level");
  97
  98static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
  99{
 100	return ioread32(pd->ioaddr + offset);
 101}
 102
 103static inline void
 104smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
 105{
 106	iowrite32(value, pd->ioaddr + offset);
 107}
 108
 109static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
 110{
 111	/* to ensure PCI write completion, we must perform a PCI read */
 112	smsc9420_reg_read(pd, ID_REV);
 113}
 114
 115static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
 116{
 117	struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
 118	unsigned long flags;
 119	u32 addr;
 120	int i, reg = -EIO;
 121
 122	spin_lock_irqsave(&pd->phy_lock, flags);
 123
 124	/*  confirm MII not busy */
 125	if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
 126		netif_warn(pd, drv, pd->dev, "MII is busy???\n");
 127		goto out;
 128	}
 129
 130	/* set the address, index & direction (read from PHY) */
 131	addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
 132		MII_ACCESS_MII_READ_;
 133	smsc9420_reg_write(pd, MII_ACCESS, addr);
 134
 135	/* wait for read to complete with 50us timeout */
 136	for (i = 0; i < 5; i++) {
 137		if (!(smsc9420_reg_read(pd, MII_ACCESS) &
 138			MII_ACCESS_MII_BUSY_)) {
 139			reg = (u16)smsc9420_reg_read(pd, MII_DATA);
 140			goto out;
 141		}
 142		udelay(10);
 143	}
 144
 145	netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
 146
 147out:
 148	spin_unlock_irqrestore(&pd->phy_lock, flags);
 149	return reg;
 150}
 151
 152static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
 153			   u16 val)
 154{
 155	struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
 156	unsigned long flags;
 157	u32 addr;
 158	int i, reg = -EIO;
 159
 160	spin_lock_irqsave(&pd->phy_lock, flags);
 161
 162	/* confirm MII not busy */
 163	if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
 164		netif_warn(pd, drv, pd->dev, "MII is busy???\n");
 165		goto out;
 166	}
 167
 168	/* put the data to write in the MAC */
 169	smsc9420_reg_write(pd, MII_DATA, (u32)val);
 170
 171	/* set the address, index & direction (write to PHY) */
 172	addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
 173		MII_ACCESS_MII_WRITE_;
 174	smsc9420_reg_write(pd, MII_ACCESS, addr);
 175
 176	/* wait for write to complete with 50us timeout */
 177	for (i = 0; i < 5; i++) {
 178		if (!(smsc9420_reg_read(pd, MII_ACCESS) &
 179			MII_ACCESS_MII_BUSY_)) {
 180			reg = 0;
 181			goto out;
 182		}
 183		udelay(10);
 184	}
 185
 186	netif_warn(pd, drv, pd->dev, "MII busy timeout!\n");
 187
 188out:
 189	spin_unlock_irqrestore(&pd->phy_lock, flags);
 190	return reg;
 191}
 192
 193/* Returns hash bit number for given MAC address
 194 * Example:
 195 * 01 00 5E 00 00 01 -> returns bit number 31 */
 196static u32 smsc9420_hash(u8 addr[ETH_ALEN])
 197{
 198	return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f;
 199}
 200
 201static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
 202{
 203	int timeout = 100000;
 204
 205	BUG_ON(!pd);
 206
 207	if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
 208		netif_dbg(pd, drv, pd->dev, "%s: Eeprom busy\n", __func__);
 209		return -EIO;
 210	}
 211
 212	smsc9420_reg_write(pd, E2P_CMD,
 213		(E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_));
 214
 215	do {
 216		udelay(10);
 217		if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_))
 218			return 0;
 219	} while (timeout--);
 220
 221	netif_warn(pd, drv, pd->dev, "%s: Eeprom timed out\n", __func__);
 222	return -EIO;
 223}
 224
 225/* Standard ioctls for mii-tool */
 226static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 227{
 228	if (!netif_running(dev) || !dev->phydev)
 229		return -EINVAL;
 230
 231	return phy_mii_ioctl(dev->phydev, ifr, cmd);
 232}
 233
 234static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
 235					 struct ethtool_drvinfo *drvinfo)
 236{
 237	struct smsc9420_pdata *pd = netdev_priv(netdev);
 238
 239	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 240	strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
 241		sizeof(drvinfo->bus_info));
 242	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 243}
 244
 245static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
 246{
 247	struct smsc9420_pdata *pd = netdev_priv(netdev);
 248	return pd->msg_enable;
 249}
 250
 251static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
 252{
 253	struct smsc9420_pdata *pd = netdev_priv(netdev);
 254	pd->msg_enable = data;
 255}
 256
 257static int smsc9420_ethtool_getregslen(struct net_device *dev)
 258{
 259	/* all smsc9420 registers plus all phy registers */
 260	return 0x100 + (32 * sizeof(u32));
 261}
 262
 263static void
 264smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
 265			 void *buf)
 266{
 267	struct smsc9420_pdata *pd = netdev_priv(dev);
 268	struct phy_device *phy_dev = dev->phydev;
 269	unsigned int i, j = 0;
 270	u32 *data = buf;
 271
 272	regs->version = smsc9420_reg_read(pd, ID_REV);
 273	for (i = 0; i < 0x100; i += (sizeof(u32)))
 274		data[j++] = smsc9420_reg_read(pd, i);
 275
 276	// cannot read phy registers if the net device is down
 277	if (!phy_dev)
 278		return;
 279
 280	for (i = 0; i <= 31; i++)
 281		data[j++] = smsc9420_mii_read(phy_dev->mdio.bus,
 282					      phy_dev->mdio.addr, i);
 283}
 284
 285static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd)
 286{
 287	unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG);
 288	temp &= ~GPIO_CFG_EEPR_EN_;
 289	smsc9420_reg_write(pd, GPIO_CFG, temp);
 290	msleep(1);
 291}
 292
 293static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
 294{
 295	int timeout = 100;
 296	u32 e2cmd;
 297
 298	netif_dbg(pd, hw, pd->dev, "op 0x%08x\n", op);
 299	if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
 300		netif_warn(pd, hw, pd->dev, "Busy at start\n");
 301		return -EBUSY;
 302	}
 303
 304	e2cmd = op | E2P_CMD_EPC_BUSY_;
 305	smsc9420_reg_write(pd, E2P_CMD, e2cmd);
 306
 307	do {
 308		msleep(1);
 309		e2cmd = smsc9420_reg_read(pd, E2P_CMD);
 310	} while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
 311
 312	if (!timeout) {
 313		netif_info(pd, hw, pd->dev, "TIMED OUT\n");
 314		return -EAGAIN;
 315	}
 316
 317	if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
 318		netif_info(pd, hw, pd->dev,
 319			   "Error occurred during eeprom operation\n");
 320		return -EINVAL;
 321	}
 322
 323	return 0;
 324}
 325
 326static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd,
 327					 u8 address, u8 *data)
 328{
 329	u32 op = E2P_CMD_EPC_CMD_READ_ | address;
 330	int ret;
 331
 332	netif_dbg(pd, hw, pd->dev, "address 0x%x\n", address);
 333	ret = smsc9420_eeprom_send_cmd(pd, op);
 334
 335	if (!ret)
 336		data[address] = smsc9420_reg_read(pd, E2P_DATA);
 337
 338	return ret;
 339}
 340
 341static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd,
 342					  u8 address, u8 data)
 343{
 344	u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
 345	int ret;
 346
 347	netif_dbg(pd, hw, pd->dev, "address 0x%x, data 0x%x\n", address, data);
 348	ret = smsc9420_eeprom_send_cmd(pd, op);
 349
 350	if (!ret) {
 351		op = E2P_CMD_EPC_CMD_WRITE_ | address;
 352		smsc9420_reg_write(pd, E2P_DATA, (u32)data);
 353		ret = smsc9420_eeprom_send_cmd(pd, op);
 354	}
 355
 356	return ret;
 357}
 358
 359static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev)
 360{
 361	return SMSC9420_EEPROM_SIZE;
 362}
 363
 364static int smsc9420_ethtool_get_eeprom(struct net_device *dev,
 365				       struct ethtool_eeprom *eeprom, u8 *data)
 366{
 367	struct smsc9420_pdata *pd = netdev_priv(dev);
 368	u8 eeprom_data[SMSC9420_EEPROM_SIZE];
 369	int len, i;
 370
 371	smsc9420_eeprom_enable_access(pd);
 372
 373	len = min(eeprom->len, SMSC9420_EEPROM_SIZE);
 374	for (i = 0; i < len; i++) {
 375		int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data);
 376		if (ret < 0) {
 377			eeprom->len = 0;
 378			return ret;
 379		}
 380	}
 381
 382	memcpy(data, &eeprom_data[eeprom->offset], len);
 383	eeprom->magic = SMSC9420_EEPROM_MAGIC;
 384	eeprom->len = len;
 385	return 0;
 386}
 387
 388static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
 389				       struct ethtool_eeprom *eeprom, u8 *data)
 390{
 391	struct smsc9420_pdata *pd = netdev_priv(dev);
 392	int ret;
 393
 394	if (eeprom->magic != SMSC9420_EEPROM_MAGIC)
 395		return -EINVAL;
 396
 397	smsc9420_eeprom_enable_access(pd);
 398	smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_);
 399	ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data);
 400	smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_);
 401
 402	/* Single byte write, according to man page */
 403	eeprom->len = 1;
 404
 405	return ret;
 406}
 407
 408static const struct ethtool_ops smsc9420_ethtool_ops = {
 409	.get_drvinfo = smsc9420_ethtool_get_drvinfo,
 410	.get_msglevel = smsc9420_ethtool_get_msglevel,
 411	.set_msglevel = smsc9420_ethtool_set_msglevel,
 412	.nway_reset = phy_ethtool_nway_reset,
 413	.get_link = ethtool_op_get_link,
 414	.get_eeprom_len = smsc9420_ethtool_get_eeprom_len,
 415	.get_eeprom = smsc9420_ethtool_get_eeprom,
 416	.set_eeprom = smsc9420_ethtool_set_eeprom,
 417	.get_regs_len = smsc9420_ethtool_getregslen,
 418	.get_regs = smsc9420_ethtool_getregs,
 419	.get_ts_info = ethtool_op_get_ts_info,
 420	.get_link_ksettings = phy_ethtool_get_link_ksettings,
 421	.set_link_ksettings = phy_ethtool_set_link_ksettings,
 422};
 423
 424/* Sets the device MAC address to dev_addr */
 425static void smsc9420_set_mac_address(struct net_device *dev)
 426{
 427	struct smsc9420_pdata *pd = netdev_priv(dev);
 428	u8 *dev_addr = dev->dev_addr;
 429	u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
 430	u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
 431	    (dev_addr[1] << 8) | dev_addr[0];
 432
 433	smsc9420_reg_write(pd, ADDRH, mac_high16);
 434	smsc9420_reg_write(pd, ADDRL, mac_low32);
 435}
 436
 437static void smsc9420_check_mac_address(struct net_device *dev)
 438{
 439	struct smsc9420_pdata *pd = netdev_priv(dev);
 440
 441	/* Check if mac address has been specified when bringing interface up */
 442	if (is_valid_ether_addr(dev->dev_addr)) {
 443		smsc9420_set_mac_address(dev);
 444		netif_dbg(pd, probe, pd->dev,
 445			  "MAC Address is specified by configuration\n");
 446	} else {
 447		/* Try reading mac address from device. if EEPROM is present
 448		 * it will already have been set */
 449		u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
 450		u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
 451		dev->dev_addr[0] = (u8)(mac_low32);
 452		dev->dev_addr[1] = (u8)(mac_low32 >> 8);
 453		dev->dev_addr[2] = (u8)(mac_low32 >> 16);
 454		dev->dev_addr[3] = (u8)(mac_low32 >> 24);
 455		dev->dev_addr[4] = (u8)(mac_high16);
 456		dev->dev_addr[5] = (u8)(mac_high16 >> 8);
 457
 458		if (is_valid_ether_addr(dev->dev_addr)) {
 459			/* eeprom values are valid  so use them */
 460			netif_dbg(pd, probe, pd->dev,
 461				  "Mac Address is read from EEPROM\n");
 462		} else {
 463			/* eeprom values are invalid, generate random MAC */
 464			eth_hw_addr_random(dev);
 465			smsc9420_set_mac_address(dev);
 466			netif_dbg(pd, probe, pd->dev,
 467				  "MAC Address is set to random\n");
 468		}
 469	}
 470}
 471
 472static void smsc9420_stop_tx(struct smsc9420_pdata *pd)
 473{
 474	u32 dmac_control, mac_cr, dma_intr_ena;
 475	int timeout = 1000;
 476
 477	/* disable TX DMAC */
 478	dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
 479	dmac_control &= (~DMAC_CONTROL_ST_);
 480	smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
 481
 482	/* Wait max 10ms for transmit process to stop */
 483	while (--timeout) {
 484		if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_)
 485			break;
 486		udelay(10);
 487	}
 488
 489	if (!timeout)
 490		netif_warn(pd, ifdown, pd->dev, "TX DMAC failed to stop\n");
 491
 492	/* ACK Tx DMAC stop bit */
 493	smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
 494
 495	/* mask TX DMAC interrupts */
 496	dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
 497	dma_intr_ena &= ~(DMAC_INTR_ENA_TX_);
 498	smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
 499	smsc9420_pci_flush_write(pd);
 500
 501	/* stop MAC TX */
 502	mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_);
 503	smsc9420_reg_write(pd, MAC_CR, mac_cr);
 504	smsc9420_pci_flush_write(pd);
 505}
 506
 507static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
 508{
 509	int i;
 510
 511	BUG_ON(!pd->tx_ring);
 512
 513	if (!pd->tx_buffers)
 514		return;
 515
 516	for (i = 0; i < TX_RING_SIZE; i++) {
 517		struct sk_buff *skb = pd->tx_buffers[i].skb;
 518
 519		if (skb) {
 520			BUG_ON(!pd->tx_buffers[i].mapping);
 521			pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
 522					 skb->len, PCI_DMA_TODEVICE);
 523			dev_kfree_skb_any(skb);
 524		}
 525
 526		pd->tx_ring[i].status = 0;
 527		pd->tx_ring[i].length = 0;
 528		pd->tx_ring[i].buffer1 = 0;
 529		pd->tx_ring[i].buffer2 = 0;
 530	}
 531	wmb();
 532
 533	kfree(pd->tx_buffers);
 534	pd->tx_buffers = NULL;
 535
 536	pd->tx_ring_head = 0;
 537	pd->tx_ring_tail = 0;
 538}
 539
 540static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
 541{
 542	int i;
 543
 544	BUG_ON(!pd->rx_ring);
 545
 546	if (!pd->rx_buffers)
 547		return;
 548
 549	for (i = 0; i < RX_RING_SIZE; i++) {
 550		if (pd->rx_buffers[i].skb)
 551			dev_kfree_skb_any(pd->rx_buffers[i].skb);
 552
 553		if (pd->rx_buffers[i].mapping)
 554			pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
 555				PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 556
 557		pd->rx_ring[i].status = 0;
 558		pd->rx_ring[i].length = 0;
 559		pd->rx_ring[i].buffer1 = 0;
 560		pd->rx_ring[i].buffer2 = 0;
 561	}
 562	wmb();
 563
 564	kfree(pd->rx_buffers);
 565	pd->rx_buffers = NULL;
 566
 567	pd->rx_ring_head = 0;
 568	pd->rx_ring_tail = 0;
 569}
 570
 571static void smsc9420_stop_rx(struct smsc9420_pdata *pd)
 572{
 573	int timeout = 1000;
 574	u32 mac_cr, dmac_control, dma_intr_ena;
 575
 576	/* mask RX DMAC interrupts */
 577	dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
 578	dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
 579	smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
 580	smsc9420_pci_flush_write(pd);
 581
 582	/* stop RX MAC prior to stoping DMA */
 583	mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_);
 584	smsc9420_reg_write(pd, MAC_CR, mac_cr);
 585	smsc9420_pci_flush_write(pd);
 586
 587	/* stop RX DMAC */
 588	dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
 589	dmac_control &= (~DMAC_CONTROL_SR_);
 590	smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
 591	smsc9420_pci_flush_write(pd);
 592
 593	/* wait up to 10ms for receive to stop */
 594	while (--timeout) {
 595		if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_)
 596			break;
 597		udelay(10);
 598	}
 599
 600	if (!timeout)
 601		netif_warn(pd, ifdown, pd->dev,
 602			   "RX DMAC did not stop! timeout\n");
 603
 604	/* ACK the Rx DMAC stop bit */
 605	smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
 606}
 607
 608static irqreturn_t smsc9420_isr(int irq, void *dev_id)
 609{
 610	struct smsc9420_pdata *pd = dev_id;
 611	u32 int_cfg, int_sts, int_ctl;
 612	irqreturn_t ret = IRQ_NONE;
 613	ulong flags;
 614
 615	BUG_ON(!pd);
 616	BUG_ON(!pd->ioaddr);
 617
 618	int_cfg = smsc9420_reg_read(pd, INT_CFG);
 619
 620	/* check if it's our interrupt */
 621	if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) !=
 622	    (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_))
 623		return IRQ_NONE;
 624
 625	int_sts = smsc9420_reg_read(pd, INT_STAT);
 626
 627	if (likely(INT_STAT_DMAC_INT_ & int_sts)) {
 628		u32 status = smsc9420_reg_read(pd, DMAC_STATUS);
 629		u32 ints_to_clear = 0;
 630
 631		if (status & DMAC_STS_TX_) {
 632			ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_);
 633			netif_wake_queue(pd->dev);
 634		}
 635
 636		if (status & DMAC_STS_RX_) {
 637			/* mask RX DMAC interrupts */
 638			u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
 639			dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
 640			smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
 641			smsc9420_pci_flush_write(pd);
 642
 643			ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
 644			napi_schedule(&pd->napi);
 645		}
 646
 647		if (ints_to_clear)
 648			smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear);
 649
 650		ret = IRQ_HANDLED;
 651	}
 652
 653	if (unlikely(INT_STAT_SW_INT_ & int_sts)) {
 654		/* mask software interrupt */
 655		spin_lock_irqsave(&pd->int_lock, flags);
 656		int_ctl = smsc9420_reg_read(pd, INT_CTL);
 657		int_ctl &= (~INT_CTL_SW_INT_EN_);
 658		smsc9420_reg_write(pd, INT_CTL, int_ctl);
 659		spin_unlock_irqrestore(&pd->int_lock, flags);
 660
 661		smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_);
 662		pd->software_irq_signal = true;
 663		smp_wmb();
 664
 665		ret = IRQ_HANDLED;
 666	}
 667
 668	/* to ensure PCI write completion, we must perform a PCI read */
 669	smsc9420_pci_flush_write(pd);
 670
 671	return ret;
 672}
 673
 674#ifdef CONFIG_NET_POLL_CONTROLLER
 675static void smsc9420_poll_controller(struct net_device *dev)
 676{
 677	struct smsc9420_pdata *pd = netdev_priv(dev);
 678	const int irq = pd->pdev->irq;
 679
 680	disable_irq(irq);
 681	smsc9420_isr(0, dev);
 682	enable_irq(irq);
 683}
 684#endif /* CONFIG_NET_POLL_CONTROLLER */
 685
 686static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd)
 687{
 688	smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_);
 689	smsc9420_reg_read(pd, BUS_MODE);
 690	udelay(2);
 691	if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
 692		netif_warn(pd, drv, pd->dev, "Software reset not cleared\n");
 693}
 694
 695static int smsc9420_stop(struct net_device *dev)
 696{
 697	struct smsc9420_pdata *pd = netdev_priv(dev);
 698	u32 int_cfg;
 699	ulong flags;
 700
 701	BUG_ON(!pd);
 702	BUG_ON(!dev->phydev);
 703
 704	/* disable master interrupt */
 705	spin_lock_irqsave(&pd->int_lock, flags);
 706	int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
 707	smsc9420_reg_write(pd, INT_CFG, int_cfg);
 708	spin_unlock_irqrestore(&pd->int_lock, flags);
 709
 710	netif_tx_disable(dev);
 711	napi_disable(&pd->napi);
 712
 713	smsc9420_stop_tx(pd);
 714	smsc9420_free_tx_ring(pd);
 715
 716	smsc9420_stop_rx(pd);
 717	smsc9420_free_rx_ring(pd);
 718
 719	free_irq(pd->pdev->irq, pd);
 720
 721	smsc9420_dmac_soft_reset(pd);
 722
 723	phy_stop(dev->phydev);
 724
 725	phy_disconnect(dev->phydev);
 726	mdiobus_unregister(pd->mii_bus);
 727	mdiobus_free(pd->mii_bus);
 728
 729	return 0;
 730}
 731
 732static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status)
 733{
 734	if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) {
 735		dev->stats.rx_errors++;
 736		if (desc_status & RDES0_DESCRIPTOR_ERROR_)
 737			dev->stats.rx_over_errors++;
 738		else if (desc_status & (RDES0_FRAME_TOO_LONG_ |
 739			RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_))
 740			dev->stats.rx_frame_errors++;
 741		else if (desc_status & RDES0_CRC_ERROR_)
 742			dev->stats.rx_crc_errors++;
 743	}
 744
 745	if (unlikely(desc_status & RDES0_LENGTH_ERROR_))
 746		dev->stats.rx_length_errors++;
 747
 748	if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) &&
 749		(desc_status & RDES0_FIRST_DESCRIPTOR_))))
 750		dev->stats.rx_length_errors++;
 751
 752	if (desc_status & RDES0_MULTICAST_FRAME_)
 753		dev->stats.multicast++;
 754}
 755
 756static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
 757				const u32 status)
 758{
 759	struct net_device *dev = pd->dev;
 760	struct sk_buff *skb;
 761	u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_)
 762		>> RDES0_FRAME_LENGTH_SHFT_;
 763
 764	/* remove crc from packet lendth */
 765	packet_length -= 4;
 766
 767	if (pd->rx_csum)
 768		packet_length -= 2;
 769
 770	dev->stats.rx_packets++;
 771	dev->stats.rx_bytes += packet_length;
 772
 773	pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
 774		PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 775	pd->rx_buffers[index].mapping = 0;
 776
 777	skb = pd->rx_buffers[index].skb;
 778	pd->rx_buffers[index].skb = NULL;
 779
 780	if (pd->rx_csum) {
 781		u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) +
 782			NET_IP_ALIGN + packet_length + 4);
 783		put_unaligned_le16(hw_csum, &skb->csum);
 784		skb->ip_summed = CHECKSUM_COMPLETE;
 785	}
 786
 787	skb_reserve(skb, NET_IP_ALIGN);
 788	skb_put(skb, packet_length);
 789
 790	skb->protocol = eth_type_trans(skb, dev);
 791
 792	netif_receive_skb(skb);
 793}
 794
 795static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
 796{
 797	struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ);
 798	dma_addr_t mapping;
 799
 800	BUG_ON(pd->rx_buffers[index].skb);
 801	BUG_ON(pd->rx_buffers[index].mapping);
 802
 803	if (unlikely(!skb))
 804		return -ENOMEM;
 805
 806	mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
 807				 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
 808	if (pci_dma_mapping_error(pd->pdev, mapping)) {
 809		dev_kfree_skb_any(skb);
 810		netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
 811		return -ENOMEM;
 812	}
 813
 814	pd->rx_buffers[index].skb = skb;
 815	pd->rx_buffers[index].mapping = mapping;
 816	pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN;
 817	pd->rx_ring[index].status = RDES0_OWN_;
 818	wmb();
 819
 820	return 0;
 821}
 822
 823static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd)
 824{
 825	while (pd->rx_ring_tail != pd->rx_ring_head) {
 826		if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail))
 827			break;
 828
 829		pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE;
 830	}
 831}
 832
 833static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
 834{
 835	struct smsc9420_pdata *pd =
 836		container_of(napi, struct smsc9420_pdata, napi);
 837	struct net_device *dev = pd->dev;
 838	u32 drop_frame_cnt, dma_intr_ena, status;
 839	int work_done;
 840
 841	for (work_done = 0; work_done < budget; work_done++) {
 842		rmb();
 843		status = pd->rx_ring[pd->rx_ring_head].status;
 844
 845		/* stop if DMAC owns this dma descriptor */
 846		if (status & RDES0_OWN_)
 847			break;
 848
 849		smsc9420_rx_count_stats(dev, status);
 850		smsc9420_rx_handoff(pd, pd->rx_ring_head, status);
 851		pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE;
 852		smsc9420_alloc_new_rx_buffers(pd);
 853	}
 854
 855	drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
 856	dev->stats.rx_dropped +=
 857	    (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF);
 858
 859	/* Kick RXDMA */
 860	smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
 861	smsc9420_pci_flush_write(pd);
 862
 863	if (work_done < budget) {
 864		napi_complete_done(&pd->napi, work_done);
 865
 866		/* re-enable RX DMA interrupts */
 867		dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
 868		dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
 869		smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
 870		smsc9420_pci_flush_write(pd);
 871	}
 872	return work_done;
 873}
 874
 875static void
 876smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length)
 877{
 878	if (unlikely(status & TDES0_ERROR_SUMMARY_)) {
 879		dev->stats.tx_errors++;
 880		if (status & (TDES0_EXCESSIVE_DEFERRAL_ |
 881			TDES0_EXCESSIVE_COLLISIONS_))
 882			dev->stats.tx_aborted_errors++;
 883
 884		if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_))
 885			dev->stats.tx_carrier_errors++;
 886	} else {
 887		dev->stats.tx_packets++;
 888		dev->stats.tx_bytes += (length & 0x7FF);
 889	}
 890
 891	if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) {
 892		dev->stats.collisions += 16;
 893	} else {
 894		dev->stats.collisions +=
 895			(status & TDES0_COLLISION_COUNT_MASK_) >>
 896			TDES0_COLLISION_COUNT_SHFT_;
 897	}
 898
 899	if (unlikely(status & TDES0_HEARTBEAT_FAIL_))
 900		dev->stats.tx_heartbeat_errors++;
 901}
 902
 903/* Check for completed dma transfers, update stats and free skbs */
 904static void smsc9420_complete_tx(struct net_device *dev)
 905{
 906	struct smsc9420_pdata *pd = netdev_priv(dev);
 907
 908	while (pd->tx_ring_tail != pd->tx_ring_head) {
 909		int index = pd->tx_ring_tail;
 910		u32 status, length;
 911
 912		rmb();
 913		status = pd->tx_ring[index].status;
 914		length = pd->tx_ring[index].length;
 915
 916		/* Check if DMA still owns this descriptor */
 917		if (unlikely(TDES0_OWN_ & status))
 918			break;
 919
 920		smsc9420_tx_update_stats(dev, status, length);
 921
 922		BUG_ON(!pd->tx_buffers[index].skb);
 923		BUG_ON(!pd->tx_buffers[index].mapping);
 924
 925		pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
 926			pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
 927		pd->tx_buffers[index].mapping = 0;
 928
 929		dev_kfree_skb_any(pd->tx_buffers[index].skb);
 930		pd->tx_buffers[index].skb = NULL;
 931
 932		pd->tx_ring[index].buffer1 = 0;
 933		wmb();
 934
 935		pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE;
 936	}
 937}
 938
 939static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
 940					    struct net_device *dev)
 941{
 942	struct smsc9420_pdata *pd = netdev_priv(dev);
 943	dma_addr_t mapping;
 944	int index = pd->tx_ring_head;
 945	u32 tmp_desc1;
 946	bool about_to_take_last_desc =
 947		(((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail);
 948
 949	smsc9420_complete_tx(dev);
 950
 951	rmb();
 952	BUG_ON(pd->tx_ring[index].status & TDES0_OWN_);
 953	BUG_ON(pd->tx_buffers[index].skb);
 954	BUG_ON(pd->tx_buffers[index].mapping);
 955
 956	mapping = pci_map_single(pd->pdev, skb->data,
 957				 skb->len, PCI_DMA_TODEVICE);
 958	if (pci_dma_mapping_error(pd->pdev, mapping)) {
 959		netif_warn(pd, tx_err, pd->dev,
 960			   "pci_map_single failed, dropping packet\n");
 961		return NETDEV_TX_BUSY;
 962	}
 963
 964	pd->tx_buffers[index].skb = skb;
 965	pd->tx_buffers[index].mapping = mapping;
 966
 967	tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF));
 968	if (unlikely(about_to_take_last_desc)) {
 969		tmp_desc1 |= TDES1_IC_;
 970		netif_stop_queue(pd->dev);
 971	}
 972
 973	/* check if we are at the last descriptor and need to set EOR */
 974	if (unlikely(index == (TX_RING_SIZE - 1)))
 975		tmp_desc1 |= TDES1_TER_;
 976
 977	pd->tx_ring[index].buffer1 = mapping;
 978	pd->tx_ring[index].length = tmp_desc1;
 979	wmb();
 980
 981	/* increment head */
 982	pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE;
 983
 984	/* assign ownership to DMAC */
 985	pd->tx_ring[index].status = TDES0_OWN_;
 986	wmb();
 987
 988	skb_tx_timestamp(skb);
 989
 990	/* kick the DMA */
 991	smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
 992	smsc9420_pci_flush_write(pd);
 993
 994	return NETDEV_TX_OK;
 995}
 996
 997static struct net_device_stats *smsc9420_get_stats(struct net_device *dev)
 998{
 999	struct smsc9420_pdata *pd = netdev_priv(dev);
1000	u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
1001	dev->stats.rx_dropped +=
1002	    (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF);
1003	return &dev->stats;
1004}
1005
1006static void smsc9420_set_multicast_list(struct net_device *dev)
1007{
1008	struct smsc9420_pdata *pd = netdev_priv(dev);
1009	u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
1010
1011	if (dev->flags & IFF_PROMISC) {
1012		netif_dbg(pd, hw, pd->dev, "Promiscuous Mode Enabled\n");
1013		mac_cr |= MAC_CR_PRMS_;
1014		mac_cr &= (~MAC_CR_MCPAS_);
1015		mac_cr &= (~MAC_CR_HPFILT_);
1016	} else if (dev->flags & IFF_ALLMULTI) {
1017		netif_dbg(pd, hw, pd->dev, "Receive all Multicast Enabled\n");
1018		mac_cr &= (~MAC_CR_PRMS_);
1019		mac_cr |= MAC_CR_MCPAS_;
1020		mac_cr &= (~MAC_CR_HPFILT_);
1021	} else if (!netdev_mc_empty(dev)) {
1022		struct netdev_hw_addr *ha;
1023		u32 hash_lo = 0, hash_hi = 0;
1024
1025		netif_dbg(pd, hw, pd->dev, "Multicast filter enabled\n");
1026		netdev_for_each_mc_addr(ha, dev) {
1027			u32 bit_num = smsc9420_hash(ha->addr);
1028			u32 mask = 1 << (bit_num & 0x1F);
1029
1030			if (bit_num & 0x20)
1031				hash_hi |= mask;
1032			else
1033				hash_lo |= mask;
1034
1035		}
1036		smsc9420_reg_write(pd, HASHH, hash_hi);
1037		smsc9420_reg_write(pd, HASHL, hash_lo);
1038
1039		mac_cr &= (~MAC_CR_PRMS_);
1040		mac_cr &= (~MAC_CR_MCPAS_);
1041		mac_cr |= MAC_CR_HPFILT_;
1042	} else {
1043		netif_dbg(pd, hw, pd->dev, "Receive own packets only\n");
1044		smsc9420_reg_write(pd, HASHH, 0);
1045		smsc9420_reg_write(pd, HASHL, 0);
1046
1047		mac_cr &= (~MAC_CR_PRMS_);
1048		mac_cr &= (~MAC_CR_MCPAS_);
1049		mac_cr &= (~MAC_CR_HPFILT_);
1050	}
1051
1052	smsc9420_reg_write(pd, MAC_CR, mac_cr);
1053	smsc9420_pci_flush_write(pd);
1054}
1055
1056static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
1057{
1058	struct net_device *dev = pd->dev;
1059	struct phy_device *phy_dev = dev->phydev;
1060	u32 flow;
1061
1062	if (phy_dev->duplex == DUPLEX_FULL) {
1063		u16 lcladv = phy_read(phy_dev, MII_ADVERTISE);
1064		u16 rmtadv = phy_read(phy_dev, MII_LPA);
1065		u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1066
1067		if (cap & FLOW_CTRL_RX)
1068			flow = 0xFFFF0002;
1069		else
1070			flow = 0;
1071
1072		netif_info(pd, link, pd->dev, "rx pause %s, tx pause %s\n",
1073			   cap & FLOW_CTRL_RX ? "enabled" : "disabled",
1074			   cap & FLOW_CTRL_TX ? "enabled" : "disabled");
1075	} else {
1076		netif_info(pd, link, pd->dev, "half duplex\n");
1077		flow = 0;
1078	}
1079
1080	smsc9420_reg_write(pd, FLOW, flow);
1081}
1082
1083/* Update link mode if anything has changed.  Called periodically when the
1084 * PHY is in polling mode, even if nothing has changed. */
1085static void smsc9420_phy_adjust_link(struct net_device *dev)
1086{
1087	struct smsc9420_pdata *pd = netdev_priv(dev);
1088	struct phy_device *phy_dev = dev->phydev;
1089	int carrier;
1090
1091	if (phy_dev->duplex != pd->last_duplex) {
1092		u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
1093		if (phy_dev->duplex) {
1094			netif_dbg(pd, link, pd->dev, "full duplex mode\n");
1095			mac_cr |= MAC_CR_FDPX_;
1096		} else {
1097			netif_dbg(pd, link, pd->dev, "half duplex mode\n");
1098			mac_cr &= ~MAC_CR_FDPX_;
1099		}
1100		smsc9420_reg_write(pd, MAC_CR, mac_cr);
1101
1102		smsc9420_phy_update_flowcontrol(pd);
1103		pd->last_duplex = phy_dev->duplex;
1104	}
1105
1106	carrier = netif_carrier_ok(dev);
1107	if (carrier != pd->last_carrier) {
1108		if (carrier)
1109			netif_dbg(pd, link, pd->dev, "carrier OK\n");
1110		else
1111			netif_dbg(pd, link, pd->dev, "no carrier\n");
1112		pd->last_carrier = carrier;
1113	}
1114}
1115
1116static int smsc9420_mii_probe(struct net_device *dev)
1117{
1118	struct smsc9420_pdata *pd = netdev_priv(dev);
1119	struct phy_device *phydev = NULL;
1120
1121	BUG_ON(dev->phydev);
1122
1123	/* Device only supports internal PHY at address 1 */
1124	phydev = mdiobus_get_phy(pd->mii_bus, 1);
1125	if (!phydev) {
1126		netdev_err(dev, "no PHY found at address 1\n");
1127		return -ENODEV;
1128	}
1129
1130	phydev = phy_connect(dev, phydev_name(phydev),
1131			     smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
1132
1133	if (IS_ERR(phydev)) {
1134		netdev_err(dev, "Could not attach to PHY\n");
1135		return PTR_ERR(phydev);
1136	}
1137
1138	/* mask with MAC supported features */
1139	phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
1140			      SUPPORTED_Asym_Pause);
1141	phydev->advertising = phydev->supported;
1142
1143	phy_attached_info(phydev);
1144
1145	pd->last_duplex = -1;
1146	pd->last_carrier = -1;
1147
1148	return 0;
1149}
1150
1151static int smsc9420_mii_init(struct net_device *dev)
1152{
1153	struct smsc9420_pdata *pd = netdev_priv(dev);
1154	int err = -ENXIO;
1155
1156	pd->mii_bus = mdiobus_alloc();
1157	if (!pd->mii_bus) {
1158		err = -ENOMEM;
1159		goto err_out_1;
1160	}
1161	pd->mii_bus->name = DRV_MDIONAME;
1162	snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x",
1163		(pd->pdev->bus->number << 8) | pd->pdev->devfn);
1164	pd->mii_bus->priv = pd;
1165	pd->mii_bus->read = smsc9420_mii_read;
1166	pd->mii_bus->write = smsc9420_mii_write;
1167
1168	/* Mask all PHYs except ID 1 (internal) */
1169	pd->mii_bus->phy_mask = ~(1 << 1);
1170
1171	if (mdiobus_register(pd->mii_bus)) {
1172		netif_warn(pd, probe, pd->dev, "Error registering mii bus\n");
1173		goto err_out_free_bus_2;
1174	}
1175
1176	if (smsc9420_mii_probe(dev) < 0) {
1177		netif_warn(pd, probe, pd->dev, "Error probing mii bus\n");
1178		goto err_out_unregister_bus_3;
1179	}
1180
1181	return 0;
1182
1183err_out_unregister_bus_3:
1184	mdiobus_unregister(pd->mii_bus);
1185err_out_free_bus_2:
1186	mdiobus_free(pd->mii_bus);
1187err_out_1:
1188	return err;
1189}
1190
1191static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
1192{
1193	int i;
1194
1195	BUG_ON(!pd->tx_ring);
1196
1197	pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
1198				       sizeof(struct smsc9420_ring_info),
1199				       GFP_KERNEL);
1200	if (!pd->tx_buffers)
1201		return -ENOMEM;
1202
1203	/* Initialize the TX Ring */
1204	for (i = 0; i < TX_RING_SIZE; i++) {
1205		pd->tx_buffers[i].skb = NULL;
1206		pd->tx_buffers[i].mapping = 0;
1207		pd->tx_ring[i].status = 0;
1208		pd->tx_ring[i].length = 0;
1209		pd->tx_ring[i].buffer1 = 0;
1210		pd->tx_ring[i].buffer2 = 0;
1211	}
1212	pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_;
1213	wmb();
1214
1215	pd->tx_ring_head = 0;
1216	pd->tx_ring_tail = 0;
1217
1218	smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr);
1219	smsc9420_pci_flush_write(pd);
1220
1221	return 0;
1222}
1223
1224static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
1225{
1226	int i;
1227
1228	BUG_ON(!pd->rx_ring);
1229
1230	pd->rx_buffers = kmalloc_array(RX_RING_SIZE,
1231				       sizeof(struct smsc9420_ring_info),
1232				       GFP_KERNEL);
1233	if (pd->rx_buffers == NULL)
1234		goto out;
1235
1236	/* initialize the rx ring */
1237	for (i = 0; i < RX_RING_SIZE; i++) {
1238		pd->rx_ring[i].status = 0;
1239		pd->rx_ring[i].length = PKT_BUF_SZ;
1240		pd->rx_ring[i].buffer2 = 0;
1241		pd->rx_buffers[i].skb = NULL;
1242		pd->rx_buffers[i].mapping = 0;
1243	}
1244	pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_);
1245
1246	/* now allocate the entire ring of skbs */
1247	for (i = 0; i < RX_RING_SIZE; i++) {
1248		if (smsc9420_alloc_rx_buffer(pd, i)) {
1249			netif_warn(pd, ifup, pd->dev,
1250				   "failed to allocate rx skb %d\n", i);
1251			goto out_free_rx_skbs;
1252		}
1253	}
1254
1255	pd->rx_ring_head = 0;
1256	pd->rx_ring_tail = 0;
1257
1258	smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
1259	netif_dbg(pd, ifup, pd->dev, "VLAN1 = 0x%08x\n",
1260		  smsc9420_reg_read(pd, VLAN1));
1261
1262	if (pd->rx_csum) {
1263		/* Enable RX COE */
1264		u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
1265		smsc9420_reg_write(pd, COE_CR, coe);
1266		netif_dbg(pd, ifup, pd->dev, "COE_CR = 0x%08x\n", coe);
1267	}
1268
1269	smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
1270	smsc9420_pci_flush_write(pd);
1271
1272	return 0;
1273
1274out_free_rx_skbs:
1275	smsc9420_free_rx_ring(pd);
1276out:
1277	return -ENOMEM;
1278}
1279
1280static int smsc9420_open(struct net_device *dev)
1281{
1282	struct smsc9420_pdata *pd = netdev_priv(dev);
1283	u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
1284	const int irq = pd->pdev->irq;
1285	unsigned long flags;
1286	int result = 0, timeout;
1287
1288	if (!is_valid_ether_addr(dev->dev_addr)) {
1289		netif_warn(pd, ifup, pd->dev,
1290			   "dev_addr is not a valid MAC address\n");
1291		result = -EADDRNOTAVAIL;
1292		goto out_0;
1293	}
1294
1295	netif_carrier_off(dev);
1296
1297	/* disable, mask and acknowledge all interrupts */
1298	spin_lock_irqsave(&pd->int_lock, flags);
1299	int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1300	smsc9420_reg_write(pd, INT_CFG, int_cfg);
1301	smsc9420_reg_write(pd, INT_CTL, 0);
1302	spin_unlock_irqrestore(&pd->int_lock, flags);
1303	smsc9420_reg_write(pd, DMAC_INTR_ENA, 0);
1304	smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1305	smsc9420_pci_flush_write(pd);
1306
1307	result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
1308	if (result) {
1309		netif_warn(pd, ifup, pd->dev, "Unable to use IRQ = %d\n", irq);
1310		result = -ENODEV;
1311		goto out_0;
1312	}
1313
1314	smsc9420_dmac_soft_reset(pd);
1315
1316	/* make sure MAC_CR is sane */
1317	smsc9420_reg_write(pd, MAC_CR, 0);
1318
1319	smsc9420_set_mac_address(dev);
1320
1321	/* Configure GPIO pins to drive LEDs */
1322	smsc9420_reg_write(pd, GPIO_CFG,
1323		(GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_));
1324
1325	bus_mode = BUS_MODE_DMA_BURST_LENGTH_16;
1326
1327#ifdef __BIG_ENDIAN
1328	bus_mode |= BUS_MODE_DBO_;
1329#endif
1330
1331	smsc9420_reg_write(pd, BUS_MODE, bus_mode);
1332
1333	smsc9420_pci_flush_write(pd);
1334
1335	/* set bus master bridge arbitration priority for Rx and TX DMA */
1336	smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1);
1337
1338	smsc9420_reg_write(pd, DMAC_CONTROL,
1339		(DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_));
1340
1341	smsc9420_pci_flush_write(pd);
1342
1343	/* test the IRQ connection to the ISR */
1344	netif_dbg(pd, ifup, pd->dev, "Testing ISR using IRQ %d\n", irq);
1345	pd->software_irq_signal = false;
1346
1347	spin_lock_irqsave(&pd->int_lock, flags);
1348	/* configure interrupt deassertion timer and enable interrupts */
1349	int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
1350	int_cfg &= ~(INT_CFG_INT_DEAS_MASK);
1351	int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK);
1352	smsc9420_reg_write(pd, INT_CFG, int_cfg);
1353
1354	/* unmask software interrupt */
1355	int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_;
1356	smsc9420_reg_write(pd, INT_CTL, int_ctl);
1357	spin_unlock_irqrestore(&pd->int_lock, flags);
1358	smsc9420_pci_flush_write(pd);
1359
1360	timeout = 1000;
1361	while (timeout--) {
1362		if (pd->software_irq_signal)
1363			break;
1364		msleep(1);
1365	}
1366
1367	/* disable interrupts */
1368	spin_lock_irqsave(&pd->int_lock, flags);
1369	int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1370	smsc9420_reg_write(pd, INT_CFG, int_cfg);
1371	spin_unlock_irqrestore(&pd->int_lock, flags);
1372
1373	if (!pd->software_irq_signal) {
1374		netif_warn(pd, ifup, pd->dev, "ISR failed signaling test\n");
1375		result = -ENODEV;
1376		goto out_free_irq_1;
1377	}
1378
1379	netif_dbg(pd, ifup, pd->dev, "ISR passed test using IRQ %d\n", irq);
1380
1381	result = smsc9420_alloc_tx_ring(pd);
1382	if (result) {
1383		netif_warn(pd, ifup, pd->dev,
1384			   "Failed to Initialize tx dma ring\n");
1385		result = -ENOMEM;
1386		goto out_free_irq_1;
1387	}
1388
1389	result = smsc9420_alloc_rx_ring(pd);
1390	if (result) {
1391		netif_warn(pd, ifup, pd->dev,
1392			   "Failed to Initialize rx dma ring\n");
1393		result = -ENOMEM;
1394		goto out_free_tx_ring_2;
1395	}
1396
1397	result = smsc9420_mii_init(dev);
1398	if (result) {
1399		netif_warn(pd, ifup, pd->dev, "Failed to initialize Phy\n");
1400		result = -ENODEV;
1401		goto out_free_rx_ring_3;
1402	}
1403
1404	/* Bring the PHY up */
1405	phy_start(dev->phydev);
1406
1407	napi_enable(&pd->napi);
1408
1409	/* start tx and rx */
1410	mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_;
1411	smsc9420_reg_write(pd, MAC_CR, mac_cr);
1412
1413	dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
1414	dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_;
1415	smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
1416	smsc9420_pci_flush_write(pd);
1417
1418	dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
1419	dma_intr_ena |=
1420		(DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
1421	smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
1422	smsc9420_pci_flush_write(pd);
1423
1424	netif_wake_queue(dev);
1425
1426	smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
1427
1428	/* enable interrupts */
1429	spin_lock_irqsave(&pd->int_lock, flags);
1430	int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
1431	smsc9420_reg_write(pd, INT_CFG, int_cfg);
1432	spin_unlock_irqrestore(&pd->int_lock, flags);
1433
1434	return 0;
1435
1436out_free_rx_ring_3:
1437	smsc9420_free_rx_ring(pd);
1438out_free_tx_ring_2:
1439	smsc9420_free_tx_ring(pd);
1440out_free_irq_1:
1441	free_irq(irq, pd);
1442out_0:
1443	return result;
1444}
1445
1446#ifdef CONFIG_PM
1447
1448static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
1449{
1450	struct net_device *dev = pci_get_drvdata(pdev);
1451	struct smsc9420_pdata *pd = netdev_priv(dev);
1452	u32 int_cfg;
1453	ulong flags;
1454
1455	/* disable interrupts */
1456	spin_lock_irqsave(&pd->int_lock, flags);
1457	int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1458	smsc9420_reg_write(pd, INT_CFG, int_cfg);
1459	spin_unlock_irqrestore(&pd->int_lock, flags);
1460
1461	if (netif_running(dev)) {
1462		netif_tx_disable(dev);
1463		smsc9420_stop_tx(pd);
1464		smsc9420_free_tx_ring(pd);
1465
1466		napi_disable(&pd->napi);
1467		smsc9420_stop_rx(pd);
1468		smsc9420_free_rx_ring(pd);
1469
1470		free_irq(pd->pdev->irq, pd);
1471
1472		netif_device_detach(dev);
1473	}
1474
1475	pci_save_state(pdev);
1476	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1477	pci_disable_device(pdev);
1478	pci_set_power_state(pdev, pci_choose_state(pdev, state));
1479
1480	return 0;
1481}
1482
1483static int smsc9420_resume(struct pci_dev *pdev)
1484{
1485	struct net_device *dev = pci_get_drvdata(pdev);
1486	struct smsc9420_pdata *pd = netdev_priv(dev);
1487	int err;
1488
1489	pci_set_power_state(pdev, PCI_D0);
1490	pci_restore_state(pdev);
1491
1492	err = pci_enable_device(pdev);
1493	if (err)
1494		return err;
1495
1496	pci_set_master(pdev);
1497
1498	err = pci_enable_wake(pdev, PCI_D0, 0);
1499	if (err)
1500		netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
1501			   err);
1502
1503	if (netif_running(dev)) {
1504		/* FIXME: gross. It looks like ancient PM relic.*/
1505		err = smsc9420_open(dev);
1506		netif_device_attach(dev);
1507	}
1508	return err;
1509}
1510
1511#endif /* CONFIG_PM */
1512
1513static const struct net_device_ops smsc9420_netdev_ops = {
1514	.ndo_open		= smsc9420_open,
1515	.ndo_stop		= smsc9420_stop,
1516	.ndo_start_xmit		= smsc9420_hard_start_xmit,
1517	.ndo_get_stats		= smsc9420_get_stats,
1518	.ndo_set_rx_mode	= smsc9420_set_multicast_list,
1519	.ndo_do_ioctl		= smsc9420_do_ioctl,
1520	.ndo_validate_addr	= eth_validate_addr,
1521	.ndo_set_mac_address 	= eth_mac_addr,
1522#ifdef CONFIG_NET_POLL_CONTROLLER
1523	.ndo_poll_controller	= smsc9420_poll_controller,
1524#endif /* CONFIG_NET_POLL_CONTROLLER */
1525};
1526
1527static int
1528smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1529{
1530	struct net_device *dev;
1531	struct smsc9420_pdata *pd;
1532	void __iomem *virt_addr;
1533	int result = 0;
1534	u32 id_rev;
1535
1536	pr_info("%s version %s\n", DRV_DESCRIPTION, DRV_VERSION);
1537
1538	/* First do the PCI initialisation */
1539	result = pci_enable_device(pdev);
1540	if (unlikely(result)) {
1541		pr_err("Cannot enable smsc9420\n");
1542		goto out_0;
1543	}
1544
1545	pci_set_master(pdev);
1546
1547	dev = alloc_etherdev(sizeof(*pd));
1548	if (!dev)
1549		goto out_disable_pci_device_1;
1550
1551	SET_NETDEV_DEV(dev, &pdev->dev);
1552
1553	if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
1554		netdev_err(dev, "Cannot find PCI device base address\n");
1555		goto out_free_netdev_2;
1556	}
1557
1558	if ((pci_request_regions(pdev, DRV_NAME))) {
1559		netdev_err(dev, "Cannot obtain PCI resources, aborting\n");
1560		goto out_free_netdev_2;
1561	}
1562
1563	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1564		netdev_err(dev, "No usable DMA configuration, aborting\n");
1565		goto out_free_regions_3;
1566	}
1567
1568	virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
1569		pci_resource_len(pdev, SMSC_BAR));
1570	if (!virt_addr) {
1571		netdev_err(dev, "Cannot map device registers, aborting\n");
1572		goto out_free_regions_3;
1573	}
1574
1575	/* registers are double mapped with 0 offset for LE and 0x200 for BE */
1576	virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
1577
1578	pd = netdev_priv(dev);
1579
1580	/* pci descriptors are created in the PCI consistent area */
1581	pd->rx_ring = pci_alloc_consistent(pdev,
1582		sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
1583		sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
1584		&pd->rx_dma_addr);
1585
1586	if (!pd->rx_ring)
1587		goto out_free_io_4;
1588
1589	/* descriptors are aligned due to the nature of pci_alloc_consistent */
1590	pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
1591	pd->tx_dma_addr = pd->rx_dma_addr +
1592	    sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
1593
1594	pd->pdev = pdev;
1595	pd->dev = dev;
1596	pd->ioaddr = virt_addr;
1597	pd->msg_enable = smsc_debug;
1598	pd->rx_csum = true;
1599
1600	netif_dbg(pd, probe, pd->dev, "lan_base=0x%08lx\n", (ulong)virt_addr);
1601
1602	id_rev = smsc9420_reg_read(pd, ID_REV);
1603	switch (id_rev & 0xFFFF0000) {
1604	case 0x94200000:
1605		netif_info(pd, probe, pd->dev,
1606			   "LAN9420 identified, ID_REV=0x%08X\n", id_rev);
1607		break;
1608	default:
1609		netif_warn(pd, probe, pd->dev, "LAN9420 NOT identified\n");
1610		netif_warn(pd, probe, pd->dev, "ID_REV=0x%08X\n", id_rev);
1611		goto out_free_dmadesc_5;
1612	}
1613
1614	smsc9420_dmac_soft_reset(pd);
1615	smsc9420_eeprom_reload(pd);
1616	smsc9420_check_mac_address(dev);
1617
1618	dev->netdev_ops = &smsc9420_netdev_ops;
1619	dev->ethtool_ops = &smsc9420_ethtool_ops;
1620
1621	netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
1622
1623	result = register_netdev(dev);
1624	if (result) {
1625		netif_warn(pd, probe, pd->dev, "error %i registering device\n",
1626			   result);
1627		goto out_free_dmadesc_5;
1628	}
1629
1630	pci_set_drvdata(pdev, dev);
1631
1632	spin_lock_init(&pd->int_lock);
1633	spin_lock_init(&pd->phy_lock);
1634
1635	dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr);
1636
1637	return 0;
1638
1639out_free_dmadesc_5:
1640	pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1641		(RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1642out_free_io_4:
1643	iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
1644out_free_regions_3:
1645	pci_release_regions(pdev);
1646out_free_netdev_2:
1647	free_netdev(dev);
1648out_disable_pci_device_1:
1649	pci_disable_device(pdev);
1650out_0:
1651	return -ENODEV;
1652}
1653
1654static void smsc9420_remove(struct pci_dev *pdev)
1655{
1656	struct net_device *dev;
1657	struct smsc9420_pdata *pd;
1658
1659	dev = pci_get_drvdata(pdev);
1660	if (!dev)
1661		return;
1662
1663	pd = netdev_priv(dev);
1664	unregister_netdev(dev);
1665
1666	/* tx_buffers and rx_buffers are freed in stop */
1667	BUG_ON(pd->tx_buffers);
1668	BUG_ON(pd->rx_buffers);
1669
1670	BUG_ON(!pd->tx_ring);
1671	BUG_ON(!pd->rx_ring);
1672
1673	pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1674		(RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1675
1676	iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
1677	pci_release_regions(pdev);
1678	free_netdev(dev);
1679	pci_disable_device(pdev);
1680}
1681
1682static struct pci_driver smsc9420_driver = {
1683	.name = DRV_NAME,
1684	.id_table = smsc9420_id_table,
1685	.probe = smsc9420_probe,
1686	.remove = smsc9420_remove,
1687#ifdef CONFIG_PM
1688	.suspend = smsc9420_suspend,
1689	.resume = smsc9420_resume,
1690#endif /* CONFIG_PM */
1691};
1692
1693static int __init smsc9420_init_module(void)
1694{
1695	smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT);
1696
1697	return pci_register_driver(&smsc9420_driver);
1698}
1699
1700static void __exit smsc9420_exit_module(void)
1701{
1702	pci_unregister_driver(&smsc9420_driver);
1703}
1704
1705module_init(smsc9420_init_module);
1706module_exit(smsc9420_exit_module);