Linux Audio

Check our new training course

Loading...
v3.15
   1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
   2 *
   3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
   4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
   5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
   6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
   7 * Copyright (C) 2006 Broadcom Corporation.
   8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
   9 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
  10 *
  11 * Distribute under GPL.
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <linux/types.h>
  20#include <linux/netdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/mii.h>
  23#include <linux/if_ether.h>
  24#include <linux/if_vlan.h>
  25#include <linux/etherdevice.h>
  26#include <linux/pci.h>
  27#include <linux/delay.h>
  28#include <linux/init.h>
  29#include <linux/interrupt.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/ssb/ssb.h>
  32#include <linux/slab.h>
  33#include <linux/phy.h>
  34
  35#include <asm/uaccess.h>
  36#include <asm/io.h>
  37#include <asm/irq.h>
  38
  39
  40#include "b44.h"
  41
  42#define DRV_MODULE_NAME		"b44"
  43#define DRV_MODULE_VERSION	"2.0"
  44#define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
  45
  46#define B44_DEF_MSG_ENABLE	  \
  47	(NETIF_MSG_DRV		| \
  48	 NETIF_MSG_PROBE	| \
  49	 NETIF_MSG_LINK		| \
  50	 NETIF_MSG_TIMER	| \
  51	 NETIF_MSG_IFDOWN	| \
  52	 NETIF_MSG_IFUP		| \
  53	 NETIF_MSG_RX_ERR	| \
  54	 NETIF_MSG_TX_ERR)
  55
  56/* length of time before we decide the hardware is borked,
  57 * and dev->tx_timeout() should be called to fix the problem
  58 */
  59#define B44_TX_TIMEOUT			(5 * HZ)
  60
  61/* hardware minimum and maximum for a single frame's data payload */
  62#define B44_MIN_MTU			60
  63#define B44_MAX_MTU			1500
  64
  65#define B44_RX_RING_SIZE		512
  66#define B44_DEF_RX_RING_PENDING		200
  67#define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
  68				 B44_RX_RING_SIZE)
  69#define B44_TX_RING_SIZE		512
  70#define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
  71#define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
  72				 B44_TX_RING_SIZE)
  73
  74#define TX_RING_GAP(BP)	\
  75	(B44_TX_RING_SIZE - (BP)->tx_pending)
  76#define TX_BUFFS_AVAIL(BP)						\
  77	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
  78	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
  79	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  80#define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
  81
  82#define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
  83#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
  84
  85/* minimum number of free TX descriptors required to wake up TX process */
  86#define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
  87
  88/* b44 internal pattern match filter info */
  89#define B44_PATTERN_BASE	0x400
  90#define B44_PATTERN_SIZE	0x80
  91#define B44_PMASK_BASE		0x600
  92#define B44_PMASK_SIZE		0x10
  93#define B44_MAX_PATTERNS	16
  94#define B44_ETHIPV6UDP_HLEN	62
  95#define B44_ETHIPV4UDP_HLEN	42
  96
  97MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
  98MODULE_DESCRIPTION(DRV_DESCRIPTION);
  99MODULE_LICENSE("GPL");
 100MODULE_VERSION(DRV_MODULE_VERSION);
 101
 102static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
 103module_param(b44_debug, int, 0);
 104MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
 105
 106
 107#ifdef CONFIG_B44_PCI
 108static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
 109	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
 110	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
 111	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
 112	{ 0 } /* terminate list with empty entry */
 113};
 114MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
 115
 116static struct pci_driver b44_pci_driver = {
 117	.name		= DRV_MODULE_NAME,
 118	.id_table	= b44_pci_tbl,
 119};
 120#endif /* CONFIG_B44_PCI */
 121
 122static const struct ssb_device_id b44_ssb_tbl[] = {
 123	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
 124	SSB_DEVTABLE_END
 125};
 126MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
 127
 128static void b44_halt(struct b44 *);
 129static void b44_init_rings(struct b44 *);
 130
 131#define B44_FULL_RESET		1
 132#define B44_FULL_RESET_SKIP_PHY	2
 133#define B44_PARTIAL_RESET	3
 134#define B44_CHIP_RESET_FULL	4
 135#define B44_CHIP_RESET_PARTIAL	5
 136
 137static void b44_init_hw(struct b44 *, int);
 138
 139static int dma_desc_sync_size;
 140static int instance;
 141
 142static const char b44_gstrings[][ETH_GSTRING_LEN] = {
 143#define _B44(x...)	# x,
 144B44_STAT_REG_DECLARE
 145#undef _B44
 146};
 147
 148static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
 149						dma_addr_t dma_base,
 150						unsigned long offset,
 151						enum dma_data_direction dir)
 152{
 153	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
 154				   dma_desc_sync_size, dir);
 155}
 156
 157static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 158					     dma_addr_t dma_base,
 159					     unsigned long offset,
 160					     enum dma_data_direction dir)
 161{
 162	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
 163				dma_desc_sync_size, dir);
 164}
 165
 166static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 167{
 168	return ssb_read32(bp->sdev, reg);
 169}
 170
 171static inline void bw32(const struct b44 *bp,
 172			unsigned long reg, unsigned long val)
 173{
 174	ssb_write32(bp->sdev, reg, val);
 175}
 176
 177static int b44_wait_bit(struct b44 *bp, unsigned long reg,
 178			u32 bit, unsigned long timeout, const int clear)
 179{
 180	unsigned long i;
 181
 182	for (i = 0; i < timeout; i++) {
 183		u32 val = br32(bp, reg);
 184
 185		if (clear && !(val & bit))
 186			break;
 187		if (!clear && (val & bit))
 188			break;
 189		udelay(10);
 190	}
 191	if (i == timeout) {
 192		if (net_ratelimit())
 193			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
 194				   bit, reg, clear ? "clear" : "set");
 195
 196		return -ENODEV;
 197	}
 198	return 0;
 199}
 200
 201static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
 202{
 203	u32 val;
 204
 205	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
 206			    (index << CAM_CTRL_INDEX_SHIFT)));
 207
 208	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 209
 210	val = br32(bp, B44_CAM_DATA_LO);
 211
 212	data[2] = (val >> 24) & 0xFF;
 213	data[3] = (val >> 16) & 0xFF;
 214	data[4] = (val >> 8) & 0xFF;
 215	data[5] = (val >> 0) & 0xFF;
 216
 217	val = br32(bp, B44_CAM_DATA_HI);
 218
 219	data[0] = (val >> 8) & 0xFF;
 220	data[1] = (val >> 0) & 0xFF;
 221}
 222
 223static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
 224{
 225	u32 val;
 226
 227	val  = ((u32) data[2]) << 24;
 228	val |= ((u32) data[3]) << 16;
 229	val |= ((u32) data[4]) <<  8;
 230	val |= ((u32) data[5]) <<  0;
 231	bw32(bp, B44_CAM_DATA_LO, val);
 232	val = (CAM_DATA_HI_VALID |
 233	       (((u32) data[0]) << 8) |
 234	       (((u32) data[1]) << 0));
 235	bw32(bp, B44_CAM_DATA_HI, val);
 236	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
 237			    (index << CAM_CTRL_INDEX_SHIFT)));
 238	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 239}
 240
 241static inline void __b44_disable_ints(struct b44 *bp)
 242{
 243	bw32(bp, B44_IMASK, 0);
 244}
 245
 246static void b44_disable_ints(struct b44 *bp)
 247{
 248	__b44_disable_ints(bp);
 249
 250	/* Flush posted writes. */
 251	br32(bp, B44_IMASK);
 252}
 253
 254static void b44_enable_ints(struct b44 *bp)
 255{
 256	bw32(bp, B44_IMASK, bp->imask);
 257}
 258
 259static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
 260{
 261	int err;
 262
 263	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 264	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 265			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
 266			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
 267			     (reg << MDIO_DATA_RA_SHIFT) |
 268			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
 269	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 270	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
 271
 272	return err;
 273}
 274
 275static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
 276{
 277	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 278	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 279			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
 280			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
 281			     (reg << MDIO_DATA_RA_SHIFT) |
 282			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
 283			     (val & MDIO_DATA_DATA)));
 284	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 285}
 286
 287static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 288{
 289	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 290		return 0;
 291
 292	return __b44_readphy(bp, bp->phy_addr, reg, val);
 293}
 294
 295static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
 296{
 297	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 298		return 0;
 299
 300	return __b44_writephy(bp, bp->phy_addr, reg, val);
 301}
 302
 303/* miilib interface */
 304static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
 305{
 306	u32 val;
 307	struct b44 *bp = netdev_priv(dev);
 308	int rc = __b44_readphy(bp, phy_id, location, &val);
 309	if (rc)
 310		return 0xffffffff;
 311	return val;
 312}
 313
 314static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
 315			       int val)
 316{
 317	struct b44 *bp = netdev_priv(dev);
 318	__b44_writephy(bp, phy_id, location, val);
 319}
 320
 321static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
 322{
 323	u32 val;
 324	struct b44 *bp = bus->priv;
 325	int rc = __b44_readphy(bp, phy_id, location, &val);
 326	if (rc)
 327		return 0xffffffff;
 328	return val;
 329}
 330
 331static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
 332				 u16 val)
 333{
 334	struct b44 *bp = bus->priv;
 335	return __b44_writephy(bp, phy_id, location, val);
 336}
 337
 338static int b44_phy_reset(struct b44 *bp)
 339{
 340	u32 val;
 341	int err;
 342
 343	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 344		return 0;
 345	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
 346	if (err)
 347		return err;
 348	udelay(100);
 349	err = b44_readphy(bp, MII_BMCR, &val);
 350	if (!err) {
 351		if (val & BMCR_RESET) {
 352			netdev_err(bp->dev, "PHY Reset would not complete\n");
 353			err = -ENODEV;
 354		}
 355	}
 356
 357	return err;
 358}
 359
 360static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
 361{
 362	u32 val;
 363
 364	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
 365	bp->flags |= pause_flags;
 366
 367	val = br32(bp, B44_RXCONFIG);
 368	if (pause_flags & B44_FLAG_RX_PAUSE)
 369		val |= RXCONFIG_FLOW;
 370	else
 371		val &= ~RXCONFIG_FLOW;
 372	bw32(bp, B44_RXCONFIG, val);
 373
 374	val = br32(bp, B44_MAC_FLOW);
 375	if (pause_flags & B44_FLAG_TX_PAUSE)
 376		val |= (MAC_FLOW_PAUSE_ENAB |
 377			(0xc0 & MAC_FLOW_RX_HI_WATER));
 378	else
 379		val &= ~MAC_FLOW_PAUSE_ENAB;
 380	bw32(bp, B44_MAC_FLOW, val);
 381}
 382
 383static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
 384{
 385	u32 pause_enab = 0;
 386
 387	/* The driver supports only rx pause by default because
 388	   the b44 mac tx pause mechanism generates excessive
 389	   pause frames.
 390	   Use ethtool to turn on b44 tx pause if necessary.
 391	 */
 392	if ((local & ADVERTISE_PAUSE_CAP) &&
 393	    (local & ADVERTISE_PAUSE_ASYM)){
 394		if ((remote & LPA_PAUSE_ASYM) &&
 395		    !(remote & LPA_PAUSE_CAP))
 396			pause_enab |= B44_FLAG_RX_PAUSE;
 397	}
 398
 399	__b44_set_flow_ctrl(bp, pause_enab);
 400}
 401
 402#ifdef CONFIG_BCM47XX
 403#include <bcm47xx_nvram.h>
 404static void b44_wap54g10_workaround(struct b44 *bp)
 405{
 406	char buf[20];
 407	u32 val;
 408	int err;
 409
 410	/*
 411	 * workaround for bad hardware design in Linksys WAP54G v1.0
 412	 * see https://dev.openwrt.org/ticket/146
 413	 * check and reset bit "isolate"
 414	 */
 415	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
 416		return;
 417	if (simple_strtoul(buf, NULL, 0) == 2) {
 418		err = __b44_readphy(bp, 0, MII_BMCR, &val);
 419		if (err)
 420			goto error;
 421		if (!(val & BMCR_ISOLATE))
 422			return;
 423		val &= ~BMCR_ISOLATE;
 424		err = __b44_writephy(bp, 0, MII_BMCR, val);
 425		if (err)
 426			goto error;
 427	}
 428	return;
 429error:
 430	pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
 431}
 432#else
 433static inline void b44_wap54g10_workaround(struct b44 *bp)
 434{
 435}
 436#endif
 437
 438static int b44_setup_phy(struct b44 *bp)
 439{
 440	u32 val;
 441	int err;
 442
 443	b44_wap54g10_workaround(bp);
 444
 445	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 446		return 0;
 447	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
 448		goto out;
 449	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
 450				val & MII_ALEDCTRL_ALLMSK)) != 0)
 451		goto out;
 452	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
 453		goto out;
 454	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
 455				val | MII_TLEDCTRL_ENABLE)) != 0)
 456		goto out;
 457
 458	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
 459		u32 adv = ADVERTISE_CSMA;
 460
 461		if (bp->flags & B44_FLAG_ADV_10HALF)
 462			adv |= ADVERTISE_10HALF;
 463		if (bp->flags & B44_FLAG_ADV_10FULL)
 464			adv |= ADVERTISE_10FULL;
 465		if (bp->flags & B44_FLAG_ADV_100HALF)
 466			adv |= ADVERTISE_100HALF;
 467		if (bp->flags & B44_FLAG_ADV_100FULL)
 468			adv |= ADVERTISE_100FULL;
 469
 470		if (bp->flags & B44_FLAG_PAUSE_AUTO)
 471			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 472
 473		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
 474			goto out;
 475		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
 476						       BMCR_ANRESTART))) != 0)
 477			goto out;
 478	} else {
 479		u32 bmcr;
 480
 481		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
 482			goto out;
 483		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
 484		if (bp->flags & B44_FLAG_100_BASE_T)
 485			bmcr |= BMCR_SPEED100;
 486		if (bp->flags & B44_FLAG_FULL_DUPLEX)
 487			bmcr |= BMCR_FULLDPLX;
 488		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
 489			goto out;
 490
 491		/* Since we will not be negotiating there is no safe way
 492		 * to determine if the link partner supports flow control
 493		 * or not.  So just disable it completely in this case.
 494		 */
 495		b44_set_flow_ctrl(bp, 0, 0);
 496	}
 497
 498out:
 499	return err;
 500}
 501
 502static void b44_stats_update(struct b44 *bp)
 503{
 504	unsigned long reg;
 505	u64 *val;
 506
 507	val = &bp->hw_stats.tx_good_octets;
 508	u64_stats_update_begin(&bp->hw_stats.syncp);
 509
 510	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
 511		*val++ += br32(bp, reg);
 512	}
 513
 514	/* Pad */
 515	reg += 8*4UL;
 516
 517	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
 518		*val++ += br32(bp, reg);
 519	}
 520
 521	u64_stats_update_end(&bp->hw_stats.syncp);
 522}
 523
 524static void b44_link_report(struct b44 *bp)
 525{
 526	if (!netif_carrier_ok(bp->dev)) {
 527		netdev_info(bp->dev, "Link is down\n");
 528	} else {
 529		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
 530			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
 531			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
 532
 533		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
 534			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
 535			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
 536	}
 537}
 538
 539static void b44_check_phy(struct b44 *bp)
 540{
 541	u32 bmsr, aux;
 542
 543	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
 544		bp->flags |= B44_FLAG_100_BASE_T;
 545		if (!netif_carrier_ok(bp->dev)) {
 546			u32 val = br32(bp, B44_TX_CTRL);
 547			if (bp->flags & B44_FLAG_FULL_DUPLEX)
 548				val |= TX_CTRL_DUPLEX;
 549			else
 550				val &= ~TX_CTRL_DUPLEX;
 551			bw32(bp, B44_TX_CTRL, val);
 552			netif_carrier_on(bp->dev);
 553			b44_link_report(bp);
 554		}
 555		return;
 556	}
 557
 558	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
 559	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
 560	    (bmsr != 0xffff)) {
 561		if (aux & MII_AUXCTRL_SPEED)
 562			bp->flags |= B44_FLAG_100_BASE_T;
 563		else
 564			bp->flags &= ~B44_FLAG_100_BASE_T;
 565		if (aux & MII_AUXCTRL_DUPLEX)
 566			bp->flags |= B44_FLAG_FULL_DUPLEX;
 567		else
 568			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
 569
 570		if (!netif_carrier_ok(bp->dev) &&
 571		    (bmsr & BMSR_LSTATUS)) {
 572			u32 val = br32(bp, B44_TX_CTRL);
 573			u32 local_adv, remote_adv;
 574
 575			if (bp->flags & B44_FLAG_FULL_DUPLEX)
 576				val |= TX_CTRL_DUPLEX;
 577			else
 578				val &= ~TX_CTRL_DUPLEX;
 579			bw32(bp, B44_TX_CTRL, val);
 580
 581			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
 582			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
 583			    !b44_readphy(bp, MII_LPA, &remote_adv))
 584				b44_set_flow_ctrl(bp, local_adv, remote_adv);
 585
 586			/* Link now up */
 587			netif_carrier_on(bp->dev);
 588			b44_link_report(bp);
 589		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
 590			/* Link now down */
 591			netif_carrier_off(bp->dev);
 592			b44_link_report(bp);
 593		}
 594
 595		if (bmsr & BMSR_RFAULT)
 596			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
 597		if (bmsr & BMSR_JCD)
 598			netdev_warn(bp->dev, "Jabber detected in PHY\n");
 599	}
 600}
 601
 602static void b44_timer(unsigned long __opaque)
 603{
 604	struct b44 *bp = (struct b44 *) __opaque;
 605
 606	spin_lock_irq(&bp->lock);
 607
 608	b44_check_phy(bp);
 609
 610	b44_stats_update(bp);
 611
 612	spin_unlock_irq(&bp->lock);
 613
 614	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
 615}
 616
 617static void b44_tx(struct b44 *bp)
 618{
 619	u32 cur, cons;
 620	unsigned bytes_compl = 0, pkts_compl = 0;
 621
 622	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 623	cur /= sizeof(struct dma_desc);
 624
 625	/* XXX needs updating when NETIF_F_SG is supported */
 626	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
 627		struct ring_info *rp = &bp->tx_buffers[cons];
 628		struct sk_buff *skb = rp->skb;
 629
 630		BUG_ON(skb == NULL);
 631
 632		dma_unmap_single(bp->sdev->dma_dev,
 633				 rp->mapping,
 634				 skb->len,
 635				 DMA_TO_DEVICE);
 636		rp->skb = NULL;
 637
 638		bytes_compl += skb->len;
 639		pkts_compl++;
 640
 641		dev_kfree_skb_irq(skb);
 642	}
 643
 644	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
 645	bp->tx_cons = cons;
 646	if (netif_queue_stopped(bp->dev) &&
 647	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
 648		netif_wake_queue(bp->dev);
 649
 650	bw32(bp, B44_GPTIMER, 0);
 651}
 652
 653/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
 654 * before the DMA address you give it.  So we allocate 30 more bytes
 655 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
 656 * point the chip at 30 bytes past where the rx_header will go.
 657 */
 658static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 659{
 660	struct dma_desc *dp;
 661	struct ring_info *src_map, *map;
 662	struct rx_header *rh;
 663	struct sk_buff *skb;
 664	dma_addr_t mapping;
 665	int dest_idx;
 666	u32 ctrl;
 667
 668	src_map = NULL;
 669	if (src_idx >= 0)
 670		src_map = &bp->rx_buffers[src_idx];
 671	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 672	map = &bp->rx_buffers[dest_idx];
 673	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
 674	if (skb == NULL)
 675		return -ENOMEM;
 676
 677	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 678				 RX_PKT_BUF_SZ,
 679				 DMA_FROM_DEVICE);
 680
 681	/* Hardware bug work-around, the chip is unable to do PCI DMA
 682	   to/from anything above 1GB :-( */
 683	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 684		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 685		/* Sigh... */
 686		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 687			dma_unmap_single(bp->sdev->dma_dev, mapping,
 688					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 689		dev_kfree_skb_any(skb);
 690		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
 691		if (skb == NULL)
 692			return -ENOMEM;
 693		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 694					 RX_PKT_BUF_SZ,
 695					 DMA_FROM_DEVICE);
 696		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 697		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 698			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 699				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 700			dev_kfree_skb_any(skb);
 701			return -ENOMEM;
 702		}
 703		bp->force_copybreak = 1;
 704	}
 705
 706	rh = (struct rx_header *) skb->data;
 707
 708	rh->len = 0;
 709	rh->flags = 0;
 710
 711	map->skb = skb;
 712	map->mapping = mapping;
 713
 714	if (src_map != NULL)
 715		src_map->skb = NULL;
 716
 717	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
 718	if (dest_idx == (B44_RX_RING_SIZE - 1))
 719		ctrl |= DESC_CTRL_EOT;
 720
 721	dp = &bp->rx_ring[dest_idx];
 722	dp->ctrl = cpu_to_le32(ctrl);
 723	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
 724
 725	if (bp->flags & B44_FLAG_RX_RING_HACK)
 726		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 727			                    dest_idx * sizeof(*dp),
 728			                    DMA_BIDIRECTIONAL);
 729
 730	return RX_PKT_BUF_SZ;
 731}
 732
 733static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 734{
 735	struct dma_desc *src_desc, *dest_desc;
 736	struct ring_info *src_map, *dest_map;
 737	struct rx_header *rh;
 738	int dest_idx;
 739	__le32 ctrl;
 740
 741	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 742	dest_desc = &bp->rx_ring[dest_idx];
 743	dest_map = &bp->rx_buffers[dest_idx];
 744	src_desc = &bp->rx_ring[src_idx];
 745	src_map = &bp->rx_buffers[src_idx];
 746
 747	dest_map->skb = src_map->skb;
 748	rh = (struct rx_header *) src_map->skb->data;
 749	rh->len = 0;
 750	rh->flags = 0;
 751	dest_map->mapping = src_map->mapping;
 752
 753	if (bp->flags & B44_FLAG_RX_RING_HACK)
 754		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
 755			                 src_idx * sizeof(*src_desc),
 756			                 DMA_BIDIRECTIONAL);
 757
 758	ctrl = src_desc->ctrl;
 759	if (dest_idx == (B44_RX_RING_SIZE - 1))
 760		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
 761	else
 762		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
 763
 764	dest_desc->ctrl = ctrl;
 765	dest_desc->addr = src_desc->addr;
 766
 767	src_map->skb = NULL;
 768
 769	if (bp->flags & B44_FLAG_RX_RING_HACK)
 770		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 771					     dest_idx * sizeof(*dest_desc),
 772					     DMA_BIDIRECTIONAL);
 773
 774	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
 775				   RX_PKT_BUF_SZ,
 776				   DMA_FROM_DEVICE);
 777}
 778
 779static int b44_rx(struct b44 *bp, int budget)
 780{
 781	int received;
 782	u32 cons, prod;
 783
 784	received = 0;
 785	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
 786	prod /= sizeof(struct dma_desc);
 787	cons = bp->rx_cons;
 788
 789	while (cons != prod && budget > 0) {
 790		struct ring_info *rp = &bp->rx_buffers[cons];
 791		struct sk_buff *skb = rp->skb;
 792		dma_addr_t map = rp->mapping;
 793		struct rx_header *rh;
 794		u16 len;
 795
 796		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
 797					RX_PKT_BUF_SZ,
 798					DMA_FROM_DEVICE);
 799		rh = (struct rx_header *) skb->data;
 800		len = le16_to_cpu(rh->len);
 801		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 802		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 803		drop_it:
 804			b44_recycle_rx(bp, cons, bp->rx_prod);
 805		drop_it_no_recycle:
 806			bp->dev->stats.rx_dropped++;
 807			goto next_pkt;
 808		}
 809
 810		if (len == 0) {
 811			int i = 0;
 812
 813			do {
 814				udelay(2);
 815				barrier();
 816				len = le16_to_cpu(rh->len);
 817			} while (len == 0 && i++ < 5);
 818			if (len == 0)
 819				goto drop_it;
 820		}
 821
 822		/* Omit CRC. */
 823		len -= 4;
 824
 825		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
 826			int skb_size;
 827			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 828			if (skb_size < 0)
 829				goto drop_it;
 830			dma_unmap_single(bp->sdev->dma_dev, map,
 831					 skb_size, DMA_FROM_DEVICE);
 832			/* Leave out rx_header */
 833			skb_put(skb, len + RX_PKT_OFFSET);
 834			skb_pull(skb, RX_PKT_OFFSET);
 835		} else {
 836			struct sk_buff *copy_skb;
 837
 838			b44_recycle_rx(bp, cons, bp->rx_prod);
 839			copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
 840			if (copy_skb == NULL)
 841				goto drop_it_no_recycle;
 842
 843			skb_put(copy_skb, len);
 844			/* DMA sync done above, copy just the actual packet */
 845			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
 846							 copy_skb->data, len);
 847			skb = copy_skb;
 848		}
 849		skb_checksum_none_assert(skb);
 850		skb->protocol = eth_type_trans(skb, bp->dev);
 851		netif_receive_skb(skb);
 852		received++;
 853		budget--;
 854	next_pkt:
 855		bp->rx_prod = (bp->rx_prod + 1) &
 856			(B44_RX_RING_SIZE - 1);
 857		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
 858	}
 859
 860	bp->rx_cons = cons;
 861	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
 862
 863	return received;
 864}
 865
 866static int b44_poll(struct napi_struct *napi, int budget)
 867{
 868	struct b44 *bp = container_of(napi, struct b44, napi);
 869	int work_done;
 870	unsigned long flags;
 871
 872	spin_lock_irqsave(&bp->lock, flags);
 873
 874	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
 875		/* spin_lock(&bp->tx_lock); */
 876		b44_tx(bp);
 877		/* spin_unlock(&bp->tx_lock); */
 878	}
 879	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
 880		bp->istat &= ~ISTAT_RFO;
 881		b44_disable_ints(bp);
 882		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
 883		b44_init_rings(bp);
 884		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 885		netif_wake_queue(bp->dev);
 886	}
 887
 888	spin_unlock_irqrestore(&bp->lock, flags);
 889
 890	work_done = 0;
 891	if (bp->istat & ISTAT_RX)
 892		work_done += b44_rx(bp, budget);
 893
 894	if (bp->istat & ISTAT_ERRORS) {
 895		spin_lock_irqsave(&bp->lock, flags);
 896		b44_halt(bp);
 897		b44_init_rings(bp);
 898		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 899		netif_wake_queue(bp->dev);
 900		spin_unlock_irqrestore(&bp->lock, flags);
 901		work_done = 0;
 902	}
 903
 904	if (work_done < budget) {
 905		napi_complete(napi);
 906		b44_enable_ints(bp);
 907	}
 908
 909	return work_done;
 910}
 911
 912static irqreturn_t b44_interrupt(int irq, void *dev_id)
 913{
 914	struct net_device *dev = dev_id;
 915	struct b44 *bp = netdev_priv(dev);
 916	u32 istat, imask;
 917	int handled = 0;
 918
 919	spin_lock(&bp->lock);
 920
 921	istat = br32(bp, B44_ISTAT);
 922	imask = br32(bp, B44_IMASK);
 923
 924	/* The interrupt mask register controls which interrupt bits
 925	 * will actually raise an interrupt to the CPU when set by hw/firmware,
 926	 * but doesn't mask off the bits.
 927	 */
 928	istat &= imask;
 929	if (istat) {
 930		handled = 1;
 931
 932		if (unlikely(!netif_running(dev))) {
 933			netdev_info(dev, "late interrupt\n");
 934			goto irq_ack;
 935		}
 936
 937		if (napi_schedule_prep(&bp->napi)) {
 938			/* NOTE: These writes are posted by the readback of
 939			 *       the ISTAT register below.
 940			 */
 941			bp->istat = istat;
 942			__b44_disable_ints(bp);
 943			__napi_schedule(&bp->napi);
 944		}
 945
 946irq_ack:
 947		bw32(bp, B44_ISTAT, istat);
 948		br32(bp, B44_ISTAT);
 949	}
 950	spin_unlock(&bp->lock);
 951	return IRQ_RETVAL(handled);
 952}
 953
 954static void b44_tx_timeout(struct net_device *dev)
 955{
 956	struct b44 *bp = netdev_priv(dev);
 957
 958	netdev_err(dev, "transmit timed out, resetting\n");
 959
 960	spin_lock_irq(&bp->lock);
 961
 962	b44_halt(bp);
 963	b44_init_rings(bp);
 964	b44_init_hw(bp, B44_FULL_RESET);
 965
 966	spin_unlock_irq(&bp->lock);
 967
 968	b44_enable_ints(bp);
 969
 970	netif_wake_queue(dev);
 971}
 972
 973static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 974{
 975	struct b44 *bp = netdev_priv(dev);
 976	int rc = NETDEV_TX_OK;
 977	dma_addr_t mapping;
 978	u32 len, entry, ctrl;
 979	unsigned long flags;
 980
 981	len = skb->len;
 982	spin_lock_irqsave(&bp->lock, flags);
 983
 984	/* This is a hard error, log it. */
 985	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
 986		netif_stop_queue(dev);
 987		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 988		goto err_out;
 989	}
 990
 991	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
 992	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
 993		struct sk_buff *bounce_skb;
 994
 995		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
 996		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 997			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
 998					     DMA_TO_DEVICE);
 999
1000		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
1001		if (!bounce_skb)
1002			goto err_out;
1003
1004		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1005					 len, DMA_TO_DEVICE);
1006		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1007			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1008				dma_unmap_single(bp->sdev->dma_dev, mapping,
1009						     len, DMA_TO_DEVICE);
1010			dev_kfree_skb_any(bounce_skb);
1011			goto err_out;
1012		}
1013
1014		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1015		dev_kfree_skb_any(skb);
1016		skb = bounce_skb;
1017	}
1018
1019	entry = bp->tx_prod;
1020	bp->tx_buffers[entry].skb = skb;
1021	bp->tx_buffers[entry].mapping = mapping;
1022
1023	ctrl  = (len & DESC_CTRL_LEN);
1024	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1025	if (entry == (B44_TX_RING_SIZE - 1))
1026		ctrl |= DESC_CTRL_EOT;
1027
1028	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1029	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1030
1031	if (bp->flags & B44_FLAG_TX_RING_HACK)
1032		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1033			                    entry * sizeof(bp->tx_ring[0]),
1034			                    DMA_TO_DEVICE);
1035
1036	entry = NEXT_TX(entry);
1037
1038	bp->tx_prod = entry;
1039
1040	wmb();
1041
1042	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1043	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1044		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1045	if (bp->flags & B44_FLAG_REORDER_BUG)
1046		br32(bp, B44_DMATX_PTR);
1047
1048	netdev_sent_queue(dev, skb->len);
1049
1050	if (TX_BUFFS_AVAIL(bp) < 1)
1051		netif_stop_queue(dev);
1052
1053out_unlock:
1054	spin_unlock_irqrestore(&bp->lock, flags);
1055
1056	return rc;
1057
1058err_out:
1059	rc = NETDEV_TX_BUSY;
1060	goto out_unlock;
1061}
1062
1063static int b44_change_mtu(struct net_device *dev, int new_mtu)
1064{
1065	struct b44 *bp = netdev_priv(dev);
1066
1067	if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1068		return -EINVAL;
1069
1070	if (!netif_running(dev)) {
1071		/* We'll just catch it later when the
1072		 * device is up'd.
1073		 */
1074		dev->mtu = new_mtu;
1075		return 0;
1076	}
1077
1078	spin_lock_irq(&bp->lock);
1079	b44_halt(bp);
1080	dev->mtu = new_mtu;
1081	b44_init_rings(bp);
1082	b44_init_hw(bp, B44_FULL_RESET);
1083	spin_unlock_irq(&bp->lock);
1084
1085	b44_enable_ints(bp);
1086
1087	return 0;
1088}
1089
1090/* Free up pending packets in all rx/tx rings.
1091 *
1092 * The chip has been shut down and the driver detached from
1093 * the networking, so no interrupts or new tx packets will
1094 * end up in the driver.  bp->lock is not held and we are not
1095 * in an interrupt context and thus may sleep.
1096 */
1097static void b44_free_rings(struct b44 *bp)
1098{
1099	struct ring_info *rp;
1100	int i;
1101
1102	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1103		rp = &bp->rx_buffers[i];
1104
1105		if (rp->skb == NULL)
1106			continue;
1107		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1108				 DMA_FROM_DEVICE);
1109		dev_kfree_skb_any(rp->skb);
1110		rp->skb = NULL;
1111	}
1112
1113	/* XXX needs changes once NETIF_F_SG is set... */
1114	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1115		rp = &bp->tx_buffers[i];
1116
1117		if (rp->skb == NULL)
1118			continue;
1119		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1120				 DMA_TO_DEVICE);
1121		dev_kfree_skb_any(rp->skb);
1122		rp->skb = NULL;
1123	}
1124}
1125
1126/* Initialize tx/rx rings for packet processing.
1127 *
1128 * The chip has been shut down and the driver detached from
1129 * the networking, so no interrupts or new tx packets will
1130 * end up in the driver.
1131 */
1132static void b44_init_rings(struct b44 *bp)
1133{
1134	int i;
1135
1136	b44_free_rings(bp);
1137
1138	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1139	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1140
1141	if (bp->flags & B44_FLAG_RX_RING_HACK)
1142		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1143					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1144
1145	if (bp->flags & B44_FLAG_TX_RING_HACK)
1146		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1147					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1148
1149	for (i = 0; i < bp->rx_pending; i++) {
1150		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1151			break;
1152	}
1153}
1154
1155/*
1156 * Must not be invoked with interrupt sources disabled and
1157 * the hardware shutdown down.
1158 */
1159static void b44_free_consistent(struct b44 *bp)
1160{
1161	kfree(bp->rx_buffers);
1162	bp->rx_buffers = NULL;
1163	kfree(bp->tx_buffers);
1164	bp->tx_buffers = NULL;
1165	if (bp->rx_ring) {
1166		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1167			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1168					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1169			kfree(bp->rx_ring);
1170		} else
1171			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1172					  bp->rx_ring, bp->rx_ring_dma);
1173		bp->rx_ring = NULL;
1174		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1175	}
1176	if (bp->tx_ring) {
1177		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1178			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1179					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1180			kfree(bp->tx_ring);
1181		} else
1182			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1183					  bp->tx_ring, bp->tx_ring_dma);
1184		bp->tx_ring = NULL;
1185		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1186	}
1187}
1188
1189/*
1190 * Must not be invoked with interrupt sources disabled and
1191 * the hardware shutdown down.  Can sleep.
1192 */
1193static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1194{
1195	int size;
1196
1197	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1198	bp->rx_buffers = kzalloc(size, gfp);
1199	if (!bp->rx_buffers)
1200		goto out_err;
1201
1202	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1203	bp->tx_buffers = kzalloc(size, gfp);
1204	if (!bp->tx_buffers)
1205		goto out_err;
1206
1207	size = DMA_TABLE_BYTES;
1208	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1209					 &bp->rx_ring_dma, gfp);
1210	if (!bp->rx_ring) {
1211		/* Allocation may have failed due to pci_alloc_consistent
1212		   insisting on use of GFP_DMA, which is more restrictive
1213		   than necessary...  */
1214		struct dma_desc *rx_ring;
1215		dma_addr_t rx_ring_dma;
1216
1217		rx_ring = kzalloc(size, gfp);
1218		if (!rx_ring)
1219			goto out_err;
1220
1221		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1222					     DMA_TABLE_BYTES,
1223					     DMA_BIDIRECTIONAL);
1224
1225		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1226			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1227			kfree(rx_ring);
1228			goto out_err;
1229		}
1230
1231		bp->rx_ring = rx_ring;
1232		bp->rx_ring_dma = rx_ring_dma;
1233		bp->flags |= B44_FLAG_RX_RING_HACK;
1234	}
1235
1236	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1237					 &bp->tx_ring_dma, gfp);
1238	if (!bp->tx_ring) {
1239		/* Allocation may have failed due to ssb_dma_alloc_consistent
1240		   insisting on use of GFP_DMA, which is more restrictive
1241		   than necessary...  */
1242		struct dma_desc *tx_ring;
1243		dma_addr_t tx_ring_dma;
1244
1245		tx_ring = kzalloc(size, gfp);
1246		if (!tx_ring)
1247			goto out_err;
1248
1249		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1250					     DMA_TABLE_BYTES,
1251					     DMA_TO_DEVICE);
1252
1253		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1254			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1255			kfree(tx_ring);
1256			goto out_err;
1257		}
1258
1259		bp->tx_ring = tx_ring;
1260		bp->tx_ring_dma = tx_ring_dma;
1261		bp->flags |= B44_FLAG_TX_RING_HACK;
1262	}
1263
1264	return 0;
1265
1266out_err:
1267	b44_free_consistent(bp);
1268	return -ENOMEM;
1269}
1270
1271/* bp->lock is held. */
1272static void b44_clear_stats(struct b44 *bp)
1273{
1274	unsigned long reg;
1275
1276	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1277	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1278		br32(bp, reg);
1279	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1280		br32(bp, reg);
1281}
1282
1283/* bp->lock is held. */
1284static void b44_chip_reset(struct b44 *bp, int reset_kind)
1285{
1286	struct ssb_device *sdev = bp->sdev;
1287	bool was_enabled;
1288
1289	was_enabled = ssb_device_is_enabled(bp->sdev);
1290
1291	ssb_device_enable(bp->sdev, 0);
1292	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1293
1294	if (was_enabled) {
1295		bw32(bp, B44_RCV_LAZY, 0);
1296		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1297		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1298		bw32(bp, B44_DMATX_CTRL, 0);
1299		bp->tx_prod = bp->tx_cons = 0;
1300		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1301			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1302				     100, 0);
1303		}
1304		bw32(bp, B44_DMARX_CTRL, 0);
1305		bp->rx_prod = bp->rx_cons = 0;
1306	}
1307
1308	b44_clear_stats(bp);
1309
1310	/*
1311	 * Don't enable PHY if we are doing a partial reset
1312	 * we are probably going to power down
1313	 */
1314	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1315		return;
1316
1317	switch (sdev->bus->bustype) {
1318	case SSB_BUSTYPE_SSB:
1319		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1320		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1321					B44_MDC_RATIO)
1322		     & MDIO_CTRL_MAXF_MASK)));
1323		break;
1324	case SSB_BUSTYPE_PCI:
1325		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1326		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1327		break;
1328	case SSB_BUSTYPE_PCMCIA:
1329	case SSB_BUSTYPE_SDIO:
1330		WARN_ON(1); /* A device with this bus does not exist. */
1331		break;
1332	}
1333
1334	br32(bp, B44_MDIO_CTRL);
1335
1336	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1337		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1338		br32(bp, B44_ENET_CTRL);
1339		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1340	} else {
1341		u32 val = br32(bp, B44_DEVCTRL);
1342
1343		if (val & DEVCTRL_EPR) {
1344			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1345			br32(bp, B44_DEVCTRL);
1346			udelay(100);
1347		}
1348		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1349	}
1350}
1351
1352/* bp->lock is held. */
1353static void b44_halt(struct b44 *bp)
1354{
1355	b44_disable_ints(bp);
1356	/* reset PHY */
1357	b44_phy_reset(bp);
1358	/* power down PHY */
1359	netdev_info(bp->dev, "powering down PHY\n");
1360	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1361	/* now reset the chip, but without enabling the MAC&PHY
1362	 * part of it. This has to be done _after_ we shut down the PHY */
1363	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1364		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1365	else
1366		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1367}
1368
1369/* bp->lock is held. */
1370static void __b44_set_mac_addr(struct b44 *bp)
1371{
1372	bw32(bp, B44_CAM_CTRL, 0);
1373	if (!(bp->dev->flags & IFF_PROMISC)) {
1374		u32 val;
1375
1376		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1377		val = br32(bp, B44_CAM_CTRL);
1378		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1379	}
1380}
1381
1382static int b44_set_mac_addr(struct net_device *dev, void *p)
1383{
1384	struct b44 *bp = netdev_priv(dev);
1385	struct sockaddr *addr = p;
1386	u32 val;
1387
1388	if (netif_running(dev))
1389		return -EBUSY;
1390
1391	if (!is_valid_ether_addr(addr->sa_data))
1392		return -EINVAL;
1393
1394	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1395
1396	spin_lock_irq(&bp->lock);
1397
1398	val = br32(bp, B44_RXCONFIG);
1399	if (!(val & RXCONFIG_CAM_ABSENT))
1400		__b44_set_mac_addr(bp);
1401
1402	spin_unlock_irq(&bp->lock);
1403
1404	return 0;
1405}
1406
1407/* Called at device open time to get the chip ready for
1408 * packet processing.  Invoked with bp->lock held.
1409 */
1410static void __b44_set_rx_mode(struct net_device *);
1411static void b44_init_hw(struct b44 *bp, int reset_kind)
1412{
1413	u32 val;
1414
1415	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1416	if (reset_kind == B44_FULL_RESET) {
1417		b44_phy_reset(bp);
1418		b44_setup_phy(bp);
1419	}
1420
1421	/* Enable CRC32, set proper LED modes and power on PHY */
1422	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1423	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1424
1425	/* This sets the MAC address too.  */
1426	__b44_set_rx_mode(bp->dev);
1427
1428	/* MTU + eth header + possible VLAN tag + struct rx_header */
1429	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1430	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1431
1432	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1433	if (reset_kind == B44_PARTIAL_RESET) {
1434		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1435				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1436	} else {
1437		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1438		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1439		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1440				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1441		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1442
1443		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1444		bp->rx_prod = bp->rx_pending;
1445
1446		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1447	}
1448
1449	val = br32(bp, B44_ENET_CTRL);
1450	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1451
1452	netdev_reset_queue(bp->dev);
1453}
1454
1455static int b44_open(struct net_device *dev)
1456{
1457	struct b44 *bp = netdev_priv(dev);
1458	int err;
1459
1460	err = b44_alloc_consistent(bp, GFP_KERNEL);
1461	if (err)
1462		goto out;
1463
1464	napi_enable(&bp->napi);
1465
1466	b44_init_rings(bp);
1467	b44_init_hw(bp, B44_FULL_RESET);
1468
1469	b44_check_phy(bp);
1470
1471	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1472	if (unlikely(err < 0)) {
1473		napi_disable(&bp->napi);
1474		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1475		b44_free_rings(bp);
1476		b44_free_consistent(bp);
1477		goto out;
1478	}
1479
1480	init_timer(&bp->timer);
1481	bp->timer.expires = jiffies + HZ;
1482	bp->timer.data = (unsigned long) bp;
1483	bp->timer.function = b44_timer;
1484	add_timer(&bp->timer);
1485
1486	b44_enable_ints(bp);
1487
1488	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1489		phy_start(bp->phydev);
1490
1491	netif_start_queue(dev);
1492out:
1493	return err;
1494}
1495
1496#ifdef CONFIG_NET_POLL_CONTROLLER
1497/*
1498 * Polling receive - used by netconsole and other diagnostic tools
1499 * to allow network i/o with interrupts disabled.
1500 */
1501static void b44_poll_controller(struct net_device *dev)
1502{
1503	disable_irq(dev->irq);
1504	b44_interrupt(dev->irq, dev);
1505	enable_irq(dev->irq);
1506}
1507#endif
1508
1509static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1510{
1511	u32 i;
1512	u32 *pattern = (u32 *) pp;
1513
1514	for (i = 0; i < bytes; i += sizeof(u32)) {
1515		bw32(bp, B44_FILT_ADDR, table_offset + i);
1516		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1517	}
1518}
1519
1520static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1521{
1522	int magicsync = 6;
1523	int k, j, len = offset;
1524	int ethaddr_bytes = ETH_ALEN;
1525
1526	memset(ppattern + offset, 0xff, magicsync);
1527	for (j = 0; j < magicsync; j++)
1528		set_bit(len++, (unsigned long *) pmask);
 
 
1529
1530	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1531		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1532			ethaddr_bytes = ETH_ALEN;
1533		else
1534			ethaddr_bytes = B44_PATTERN_SIZE - len;
1535		if (ethaddr_bytes <=0)
1536			break;
1537		for (k = 0; k< ethaddr_bytes; k++) {
1538			ppattern[offset + magicsync +
1539				(j * ETH_ALEN) + k] = macaddr[k];
1540			set_bit(len++, (unsigned long *) pmask);
 
1541		}
1542	}
1543	return len - 1;
1544}
1545
1546/* Setup magic packet patterns in the b44 WOL
1547 * pattern matching filter.
1548 */
1549static void b44_setup_pseudo_magicp(struct b44 *bp)
1550{
1551
1552	u32 val;
1553	int plen0, plen1, plen2;
1554	u8 *pwol_pattern;
1555	u8 pwol_mask[B44_PMASK_SIZE];
1556
1557	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1558	if (!pwol_pattern)
1559		return;
1560
1561	/* Ipv4 magic packet pattern - pattern 0.*/
1562	memset(pwol_mask, 0, B44_PMASK_SIZE);
1563	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1564				  B44_ETHIPV4UDP_HLEN);
1565
1566   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1567   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1568
1569	/* Raw ethernet II magic packet pattern - pattern 1 */
1570	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1571	memset(pwol_mask, 0, B44_PMASK_SIZE);
1572	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1573				  ETH_HLEN);
1574
1575   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1576		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1577  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1578		       B44_PMASK_BASE + B44_PMASK_SIZE);
1579
1580	/* Ipv6 magic packet pattern - pattern 2 */
1581	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1582	memset(pwol_mask, 0, B44_PMASK_SIZE);
1583	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1584				  B44_ETHIPV6UDP_HLEN);
1585
1586   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1587		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1588  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1589		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1590
1591	kfree(pwol_pattern);
1592
1593	/* set these pattern's lengths: one less than each real length */
1594	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1595	bw32(bp, B44_WKUP_LEN, val);
1596
1597	/* enable wakeup pattern matching */
1598	val = br32(bp, B44_DEVCTRL);
1599	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1600
1601}
1602
1603#ifdef CONFIG_B44_PCI
1604static void b44_setup_wol_pci(struct b44 *bp)
1605{
1606	u16 val;
1607
1608	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1609		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1610		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1611		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1612	}
1613}
1614#else
1615static inline void b44_setup_wol_pci(struct b44 *bp) { }
1616#endif /* CONFIG_B44_PCI */
1617
1618static void b44_setup_wol(struct b44 *bp)
1619{
1620	u32 val;
1621
1622	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1623
1624	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1625
1626		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1627
1628		val = bp->dev->dev_addr[2] << 24 |
1629			bp->dev->dev_addr[3] << 16 |
1630			bp->dev->dev_addr[4] << 8 |
1631			bp->dev->dev_addr[5];
1632		bw32(bp, B44_ADDR_LO, val);
1633
1634		val = bp->dev->dev_addr[0] << 8 |
1635			bp->dev->dev_addr[1];
1636		bw32(bp, B44_ADDR_HI, val);
1637
1638		val = br32(bp, B44_DEVCTRL);
1639		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1640
1641 	} else {
1642 		b44_setup_pseudo_magicp(bp);
1643 	}
1644	b44_setup_wol_pci(bp);
1645}
1646
1647static int b44_close(struct net_device *dev)
1648{
1649	struct b44 *bp = netdev_priv(dev);
1650
1651	netif_stop_queue(dev);
1652
1653	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1654		phy_stop(bp->phydev);
1655
1656	napi_disable(&bp->napi);
1657
1658	del_timer_sync(&bp->timer);
1659
1660	spin_lock_irq(&bp->lock);
1661
1662	b44_halt(bp);
1663	b44_free_rings(bp);
1664	netif_carrier_off(dev);
1665
1666	spin_unlock_irq(&bp->lock);
1667
1668	free_irq(dev->irq, dev);
1669
1670	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1671		b44_init_hw(bp, B44_PARTIAL_RESET);
1672		b44_setup_wol(bp);
1673	}
1674
1675	b44_free_consistent(bp);
1676
1677	return 0;
1678}
1679
1680static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1681					struct rtnl_link_stats64 *nstat)
1682{
1683	struct b44 *bp = netdev_priv(dev);
1684	struct b44_hw_stats *hwstat = &bp->hw_stats;
1685	unsigned int start;
1686
1687	do {
1688		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1689
1690		/* Convert HW stats into rtnl_link_stats64 stats. */
1691		nstat->rx_packets = hwstat->rx_pkts;
1692		nstat->tx_packets = hwstat->tx_pkts;
1693		nstat->rx_bytes   = hwstat->rx_octets;
1694		nstat->tx_bytes   = hwstat->tx_octets;
1695		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1696				     hwstat->tx_oversize_pkts +
1697				     hwstat->tx_underruns +
1698				     hwstat->tx_excessive_cols +
1699				     hwstat->tx_late_cols);
1700		nstat->multicast  = hwstat->tx_multicast_pkts;
1701		nstat->collisions = hwstat->tx_total_cols;
1702
1703		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1704					   hwstat->rx_undersize);
1705		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1706		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1707		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1708		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1709					   hwstat->rx_oversize_pkts +
1710					   hwstat->rx_missed_pkts +
1711					   hwstat->rx_crc_align_errs +
1712					   hwstat->rx_undersize +
1713					   hwstat->rx_crc_errs +
1714					   hwstat->rx_align_errs +
1715					   hwstat->rx_symbol_errs);
1716
1717		nstat->tx_aborted_errors = hwstat->tx_underruns;
1718#if 0
1719		/* Carrier lost counter seems to be broken for some devices */
1720		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1721#endif
1722	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1723
1724	return nstat;
1725}
1726
1727static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1728{
1729	struct netdev_hw_addr *ha;
1730	int i, num_ents;
1731
1732	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1733	i = 0;
1734	netdev_for_each_mc_addr(ha, dev) {
1735		if (i == num_ents)
1736			break;
1737		__b44_cam_write(bp, ha->addr, i++ + 1);
1738	}
1739	return i+1;
1740}
1741
1742static void __b44_set_rx_mode(struct net_device *dev)
1743{
1744	struct b44 *bp = netdev_priv(dev);
1745	u32 val;
1746
1747	val = br32(bp, B44_RXCONFIG);
1748	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1749	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1750		val |= RXCONFIG_PROMISC;
1751		bw32(bp, B44_RXCONFIG, val);
1752	} else {
1753		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1754		int i = 1;
1755
1756		__b44_set_mac_addr(bp);
1757
1758		if ((dev->flags & IFF_ALLMULTI) ||
1759		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1760			val |= RXCONFIG_ALLMULTI;
1761		else
1762			i = __b44_load_mcast(bp, dev);
1763
1764		for (; i < 64; i++)
1765			__b44_cam_write(bp, zero, i);
1766
1767		bw32(bp, B44_RXCONFIG, val);
1768        	val = br32(bp, B44_CAM_CTRL);
1769	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1770	}
1771}
1772
1773static void b44_set_rx_mode(struct net_device *dev)
1774{
1775	struct b44 *bp = netdev_priv(dev);
1776
1777	spin_lock_irq(&bp->lock);
1778	__b44_set_rx_mode(dev);
1779	spin_unlock_irq(&bp->lock);
1780}
1781
1782static u32 b44_get_msglevel(struct net_device *dev)
1783{
1784	struct b44 *bp = netdev_priv(dev);
1785	return bp->msg_enable;
1786}
1787
1788static void b44_set_msglevel(struct net_device *dev, u32 value)
1789{
1790	struct b44 *bp = netdev_priv(dev);
1791	bp->msg_enable = value;
1792}
1793
1794static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1795{
1796	struct b44 *bp = netdev_priv(dev);
1797	struct ssb_bus *bus = bp->sdev->bus;
1798
1799	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1800	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1801	switch (bus->bustype) {
1802	case SSB_BUSTYPE_PCI:
1803		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1804		break;
1805	case SSB_BUSTYPE_SSB:
1806		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1807		break;
1808	case SSB_BUSTYPE_PCMCIA:
1809	case SSB_BUSTYPE_SDIO:
1810		WARN_ON(1); /* A device with this bus does not exist. */
1811		break;
1812	}
1813}
1814
1815static int b44_nway_reset(struct net_device *dev)
1816{
1817	struct b44 *bp = netdev_priv(dev);
1818	u32 bmcr;
1819	int r;
1820
1821	spin_lock_irq(&bp->lock);
1822	b44_readphy(bp, MII_BMCR, &bmcr);
1823	b44_readphy(bp, MII_BMCR, &bmcr);
1824	r = -EINVAL;
1825	if (bmcr & BMCR_ANENABLE) {
1826		b44_writephy(bp, MII_BMCR,
1827			     bmcr | BMCR_ANRESTART);
1828		r = 0;
1829	}
1830	spin_unlock_irq(&bp->lock);
1831
1832	return r;
1833}
1834
1835static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1836{
1837	struct b44 *bp = netdev_priv(dev);
 
1838
1839	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1840		BUG_ON(!bp->phydev);
1841		return phy_ethtool_gset(bp->phydev, cmd);
 
 
1842	}
1843
1844	cmd->supported = (SUPPORTED_Autoneg);
1845	cmd->supported |= (SUPPORTED_100baseT_Half |
1846			  SUPPORTED_100baseT_Full |
1847			  SUPPORTED_10baseT_Half |
1848			  SUPPORTED_10baseT_Full |
1849			  SUPPORTED_MII);
1850
1851	cmd->advertising = 0;
1852	if (bp->flags & B44_FLAG_ADV_10HALF)
1853		cmd->advertising |= ADVERTISED_10baseT_Half;
1854	if (bp->flags & B44_FLAG_ADV_10FULL)
1855		cmd->advertising |= ADVERTISED_10baseT_Full;
1856	if (bp->flags & B44_FLAG_ADV_100HALF)
1857		cmd->advertising |= ADVERTISED_100baseT_Half;
1858	if (bp->flags & B44_FLAG_ADV_100FULL)
1859		cmd->advertising |= ADVERTISED_100baseT_Full;
1860	cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1861	ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1862				    SPEED_100 : SPEED_10));
1863	cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1864		DUPLEX_FULL : DUPLEX_HALF;
1865	cmd->port = 0;
1866	cmd->phy_address = bp->phy_addr;
1867	cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
1868		XCVR_EXTERNAL : XCVR_INTERNAL;
1869	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1870		AUTONEG_DISABLE : AUTONEG_ENABLE;
1871	if (cmd->autoneg == AUTONEG_ENABLE)
1872		cmd->advertising |= ADVERTISED_Autoneg;
 
 
 
 
 
 
1873	if (!netif_running(dev)){
1874		ethtool_cmd_speed_set(cmd, 0);
1875		cmd->duplex = 0xff;
1876	}
1877	cmd->maxtxpkt = 0;
1878	cmd->maxrxpkt = 0;
1879	return 0;
1880}
1881
1882static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1883{
1884	struct b44 *bp = netdev_priv(dev);
1885	u32 speed;
1886	int ret;
 
1887
1888	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1889		BUG_ON(!bp->phydev);
1890		spin_lock_irq(&bp->lock);
1891		if (netif_running(dev))
1892			b44_setup_phy(bp);
1893
1894		ret = phy_ethtool_sset(bp->phydev, cmd);
1895
1896		spin_unlock_irq(&bp->lock);
1897
1898		return ret;
1899	}
1900
1901	speed = ethtool_cmd_speed(cmd);
 
 
 
1902
1903	/* We do not support gigabit. */
1904	if (cmd->autoneg == AUTONEG_ENABLE) {
1905		if (cmd->advertising &
1906		    (ADVERTISED_1000baseT_Half |
1907		     ADVERTISED_1000baseT_Full))
1908			return -EINVAL;
1909	} else if ((speed != SPEED_100 &&
1910		    speed != SPEED_10) ||
1911		   (cmd->duplex != DUPLEX_HALF &&
1912		    cmd->duplex != DUPLEX_FULL)) {
1913			return -EINVAL;
1914	}
1915
1916	spin_lock_irq(&bp->lock);
1917
1918	if (cmd->autoneg == AUTONEG_ENABLE) {
1919		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1920			       B44_FLAG_100_BASE_T |
1921			       B44_FLAG_FULL_DUPLEX |
1922			       B44_FLAG_ADV_10HALF |
1923			       B44_FLAG_ADV_10FULL |
1924			       B44_FLAG_ADV_100HALF |
1925			       B44_FLAG_ADV_100FULL);
1926		if (cmd->advertising == 0) {
1927			bp->flags |= (B44_FLAG_ADV_10HALF |
1928				      B44_FLAG_ADV_10FULL |
1929				      B44_FLAG_ADV_100HALF |
1930				      B44_FLAG_ADV_100FULL);
1931		} else {
1932			if (cmd->advertising & ADVERTISED_10baseT_Half)
1933				bp->flags |= B44_FLAG_ADV_10HALF;
1934			if (cmd->advertising & ADVERTISED_10baseT_Full)
1935				bp->flags |= B44_FLAG_ADV_10FULL;
1936			if (cmd->advertising & ADVERTISED_100baseT_Half)
1937				bp->flags |= B44_FLAG_ADV_100HALF;
1938			if (cmd->advertising & ADVERTISED_100baseT_Full)
1939				bp->flags |= B44_FLAG_ADV_100FULL;
1940		}
1941	} else {
1942		bp->flags |= B44_FLAG_FORCE_LINK;
1943		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1944		if (speed == SPEED_100)
1945			bp->flags |= B44_FLAG_100_BASE_T;
1946		if (cmd->duplex == DUPLEX_FULL)
1947			bp->flags |= B44_FLAG_FULL_DUPLEX;
1948	}
1949
1950	if (netif_running(dev))
1951		b44_setup_phy(bp);
1952
1953	spin_unlock_irq(&bp->lock);
1954
1955	return 0;
1956}
1957
1958static void b44_get_ringparam(struct net_device *dev,
1959			      struct ethtool_ringparam *ering)
1960{
1961	struct b44 *bp = netdev_priv(dev);
1962
1963	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1964	ering->rx_pending = bp->rx_pending;
1965
1966	/* XXX ethtool lacks a tx_max_pending, oops... */
1967}
1968
1969static int b44_set_ringparam(struct net_device *dev,
1970			     struct ethtool_ringparam *ering)
1971{
1972	struct b44 *bp = netdev_priv(dev);
1973
1974	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1975	    (ering->rx_mini_pending != 0) ||
1976	    (ering->rx_jumbo_pending != 0) ||
1977	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1978		return -EINVAL;
1979
1980	spin_lock_irq(&bp->lock);
1981
1982	bp->rx_pending = ering->rx_pending;
1983	bp->tx_pending = ering->tx_pending;
1984
1985	b44_halt(bp);
1986	b44_init_rings(bp);
1987	b44_init_hw(bp, B44_FULL_RESET);
1988	netif_wake_queue(bp->dev);
1989	spin_unlock_irq(&bp->lock);
1990
1991	b44_enable_ints(bp);
1992
1993	return 0;
1994}
1995
1996static void b44_get_pauseparam(struct net_device *dev,
1997				struct ethtool_pauseparam *epause)
1998{
1999	struct b44 *bp = netdev_priv(dev);
2000
2001	epause->autoneg =
2002		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2003	epause->rx_pause =
2004		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
2005	epause->tx_pause =
2006		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
2007}
2008
2009static int b44_set_pauseparam(struct net_device *dev,
2010				struct ethtool_pauseparam *epause)
2011{
2012	struct b44 *bp = netdev_priv(dev);
2013
2014	spin_lock_irq(&bp->lock);
2015	if (epause->autoneg)
2016		bp->flags |= B44_FLAG_PAUSE_AUTO;
2017	else
2018		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2019	if (epause->rx_pause)
2020		bp->flags |= B44_FLAG_RX_PAUSE;
2021	else
2022		bp->flags &= ~B44_FLAG_RX_PAUSE;
2023	if (epause->tx_pause)
2024		bp->flags |= B44_FLAG_TX_PAUSE;
2025	else
2026		bp->flags &= ~B44_FLAG_TX_PAUSE;
2027	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2028		b44_halt(bp);
2029		b44_init_rings(bp);
2030		b44_init_hw(bp, B44_FULL_RESET);
2031	} else {
2032		__b44_set_flow_ctrl(bp, bp->flags);
2033	}
2034	spin_unlock_irq(&bp->lock);
2035
2036	b44_enable_ints(bp);
2037
2038	return 0;
2039}
2040
2041static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2042{
2043	switch(stringset) {
2044	case ETH_SS_STATS:
2045		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2046		break;
2047	}
2048}
2049
2050static int b44_get_sset_count(struct net_device *dev, int sset)
2051{
2052	switch (sset) {
2053	case ETH_SS_STATS:
2054		return ARRAY_SIZE(b44_gstrings);
2055	default:
2056		return -EOPNOTSUPP;
2057	}
2058}
2059
2060static void b44_get_ethtool_stats(struct net_device *dev,
2061				  struct ethtool_stats *stats, u64 *data)
2062{
2063	struct b44 *bp = netdev_priv(dev);
2064	struct b44_hw_stats *hwstat = &bp->hw_stats;
2065	u64 *data_src, *data_dst;
2066	unsigned int start;
2067	u32 i;
2068
2069	spin_lock_irq(&bp->lock);
2070	b44_stats_update(bp);
2071	spin_unlock_irq(&bp->lock);
2072
2073	do {
2074		data_src = &hwstat->tx_good_octets;
2075		data_dst = data;
2076		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2077
2078		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2079			*data_dst++ = *data_src++;
2080
2081	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2082}
2083
2084static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2085{
2086	struct b44 *bp = netdev_priv(dev);
2087
2088	wol->supported = WAKE_MAGIC;
2089	if (bp->flags & B44_FLAG_WOL_ENABLE)
2090		wol->wolopts = WAKE_MAGIC;
2091	else
2092		wol->wolopts = 0;
2093	memset(&wol->sopass, 0, sizeof(wol->sopass));
2094}
2095
2096static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2097{
2098	struct b44 *bp = netdev_priv(dev);
2099
2100	spin_lock_irq(&bp->lock);
2101	if (wol->wolopts & WAKE_MAGIC)
2102		bp->flags |= B44_FLAG_WOL_ENABLE;
2103	else
2104		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2105	spin_unlock_irq(&bp->lock);
2106
 
2107	return 0;
2108}
2109
2110static const struct ethtool_ops b44_ethtool_ops = {
2111	.get_drvinfo		= b44_get_drvinfo,
2112	.get_settings		= b44_get_settings,
2113	.set_settings		= b44_set_settings,
2114	.nway_reset		= b44_nway_reset,
2115	.get_link		= ethtool_op_get_link,
2116	.get_wol		= b44_get_wol,
2117	.set_wol		= b44_set_wol,
2118	.get_ringparam		= b44_get_ringparam,
2119	.set_ringparam		= b44_set_ringparam,
2120	.get_pauseparam		= b44_get_pauseparam,
2121	.set_pauseparam		= b44_set_pauseparam,
2122	.get_msglevel		= b44_get_msglevel,
2123	.set_msglevel		= b44_set_msglevel,
2124	.get_strings		= b44_get_strings,
2125	.get_sset_count		= b44_get_sset_count,
2126	.get_ethtool_stats	= b44_get_ethtool_stats,
 
 
2127};
2128
2129static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2130{
2131	struct b44 *bp = netdev_priv(dev);
2132	int err = -EINVAL;
2133
2134	if (!netif_running(dev))
2135		goto out;
2136
2137	spin_lock_irq(&bp->lock);
2138	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2139		BUG_ON(!bp->phydev);
2140		err = phy_mii_ioctl(bp->phydev, ifr, cmd);
2141	} else {
2142		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2143	}
2144	spin_unlock_irq(&bp->lock);
2145out:
2146	return err;
2147}
2148
2149static int b44_get_invariants(struct b44 *bp)
2150{
2151	struct ssb_device *sdev = bp->sdev;
2152	int err = 0;
2153	u8 *addr;
2154
2155	bp->dma_offset = ssb_dma_translation(sdev);
2156
2157	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2158	    instance > 1) {
2159		addr = sdev->bus->sprom.et1mac;
2160		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2161	} else {
2162		addr = sdev->bus->sprom.et0mac;
2163		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2164	}
2165	/* Some ROMs have buggy PHY addresses with the high
2166	 * bits set (sign extension?). Truncate them to a
2167	 * valid PHY address. */
2168	bp->phy_addr &= 0x1F;
2169
2170	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2171
2172	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2173		pr_err("Invalid MAC address found in EEPROM\n");
2174		return -EINVAL;
2175	}
2176
2177	bp->imask = IMASK_DEF;
2178
2179	/* XXX - really required?
2180	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2181	*/
2182
2183	if (bp->sdev->id.revision >= 7)
2184		bp->flags |= B44_FLAG_B0_ANDLATER;
2185
2186	return err;
2187}
2188
2189static const struct net_device_ops b44_netdev_ops = {
2190	.ndo_open		= b44_open,
2191	.ndo_stop		= b44_close,
2192	.ndo_start_xmit		= b44_start_xmit,
2193	.ndo_get_stats64	= b44_get_stats64,
2194	.ndo_set_rx_mode	= b44_set_rx_mode,
2195	.ndo_set_mac_address	= b44_set_mac_addr,
2196	.ndo_validate_addr	= eth_validate_addr,
2197	.ndo_do_ioctl		= b44_ioctl,
2198	.ndo_tx_timeout		= b44_tx_timeout,
2199	.ndo_change_mtu		= b44_change_mtu,
2200#ifdef CONFIG_NET_POLL_CONTROLLER
2201	.ndo_poll_controller	= b44_poll_controller,
2202#endif
2203};
2204
2205static void b44_adjust_link(struct net_device *dev)
2206{
2207	struct b44 *bp = netdev_priv(dev);
2208	struct phy_device *phydev = bp->phydev;
2209	bool status_changed = 0;
2210
2211	BUG_ON(!phydev);
2212
2213	if (bp->old_link != phydev->link) {
2214		status_changed = 1;
2215		bp->old_link = phydev->link;
2216	}
2217
2218	/* reflect duplex change */
2219	if (phydev->link) {
2220		if ((phydev->duplex == DUPLEX_HALF) &&
2221		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2222			status_changed = 1;
2223			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2224		} else if ((phydev->duplex == DUPLEX_FULL) &&
2225			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2226			status_changed = 1;
2227			bp->flags |= B44_FLAG_FULL_DUPLEX;
2228		}
2229	}
2230
2231	if (status_changed) {
2232		u32 val = br32(bp, B44_TX_CTRL);
2233		if (bp->flags & B44_FLAG_FULL_DUPLEX)
2234			val |= TX_CTRL_DUPLEX;
2235		else
2236			val &= ~TX_CTRL_DUPLEX;
2237		bw32(bp, B44_TX_CTRL, val);
2238		phy_print_status(phydev);
2239	}
2240}
2241
2242static int b44_register_phy_one(struct b44 *bp)
2243{
 
2244	struct mii_bus *mii_bus;
2245	struct ssb_device *sdev = bp->sdev;
2246	struct phy_device *phydev;
2247	char bus_id[MII_BUS_ID_SIZE + 3];
2248	struct ssb_sprom *sprom = &sdev->bus->sprom;
2249	int err;
2250
2251	mii_bus = mdiobus_alloc();
2252	if (!mii_bus) {
2253		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2254		err = -ENOMEM;
2255		goto err_out;
2256	}
2257
2258	mii_bus->priv = bp;
2259	mii_bus->read = b44_mdio_read_phylib;
2260	mii_bus->write = b44_mdio_write_phylib;
2261	mii_bus->name = "b44_eth_mii";
2262	mii_bus->parent = sdev->dev;
2263	mii_bus->phy_mask = ~(1 << bp->phy_addr);
2264	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2265	mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
2266	if (!mii_bus->irq) {
2267		dev_err(sdev->dev, "mii_bus irq allocation failed\n");
2268		err = -ENOMEM;
2269		goto err_out_mdiobus;
2270	}
2271
2272	memset(mii_bus->irq, PHY_POLL, sizeof(int) * PHY_MAX_ADDR);
2273
2274	bp->mii_bus = mii_bus;
2275
2276	err = mdiobus_register(mii_bus);
2277	if (err) {
2278		dev_err(sdev->dev, "failed to register MII bus\n");
2279		goto err_out_mdiobus_irq;
2280	}
2281
2282	if (!bp->mii_bus->phy_map[bp->phy_addr] &&
2283	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2284
2285		dev_info(sdev->dev,
2286			 "could not find PHY at %i, use fixed one\n",
2287			 bp->phy_addr);
2288
2289		bp->phy_addr = 0;
2290		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2291			 bp->phy_addr);
2292	} else {
2293		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2294			 bp->phy_addr);
2295	}
2296
2297	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2298			     PHY_INTERFACE_MODE_MII);
2299	if (IS_ERR(phydev)) {
2300		dev_err(sdev->dev, "could not attach PHY at %i\n",
2301			bp->phy_addr);
2302		err = PTR_ERR(phydev);
2303		goto err_out_mdiobus_unregister;
2304	}
2305
2306	/* mask with MAC supported features */
2307	phydev->supported &= (SUPPORTED_100baseT_Half |
2308			      SUPPORTED_100baseT_Full |
2309			      SUPPORTED_Autoneg |
2310			      SUPPORTED_MII);
2311	phydev->advertising = phydev->supported;
 
2312
2313	bp->phydev = phydev;
2314	bp->old_link = 0;
2315	bp->phy_addr = phydev->addr;
2316
2317	dev_info(sdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
2318		 phydev->drv->name, dev_name(&phydev->dev));
2319
2320	return 0;
2321
2322err_out_mdiobus_unregister:
2323	mdiobus_unregister(mii_bus);
2324
2325err_out_mdiobus_irq:
2326	kfree(mii_bus->irq);
2327
2328err_out_mdiobus:
2329	mdiobus_free(mii_bus);
2330
2331err_out:
2332	return err;
2333}
2334
2335static void b44_unregister_phy_one(struct b44 *bp)
2336{
 
2337	struct mii_bus *mii_bus = bp->mii_bus;
2338
2339	phy_disconnect(bp->phydev);
2340	mdiobus_unregister(mii_bus);
2341	kfree(mii_bus->irq);
2342	mdiobus_free(mii_bus);
2343}
2344
2345static int b44_init_one(struct ssb_device *sdev,
2346			const struct ssb_device_id *ent)
2347{
2348	struct net_device *dev;
2349	struct b44 *bp;
2350	int err;
2351
2352	instance++;
2353
2354	pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2355
2356	dev = alloc_etherdev(sizeof(*bp));
2357	if (!dev) {
2358		err = -ENOMEM;
2359		goto out;
2360	}
2361
2362	SET_NETDEV_DEV(dev, sdev->dev);
2363
2364	/* No interesting netdevice features in this card... */
2365	dev->features |= 0;
2366
2367	bp = netdev_priv(dev);
2368	bp->sdev = sdev;
2369	bp->dev = dev;
2370	bp->force_copybreak = 0;
2371
2372	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2373
2374	spin_lock_init(&bp->lock);
 
2375
2376	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2377	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2378
2379	dev->netdev_ops = &b44_netdev_ops;
2380	netif_napi_add(dev, &bp->napi, b44_poll, 64);
2381	dev->watchdog_timeo = B44_TX_TIMEOUT;
 
 
2382	dev->irq = sdev->irq;
2383	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2384
2385	err = ssb_bus_powerup(sdev->bus, 0);
2386	if (err) {
2387		dev_err(sdev->dev,
2388			"Failed to powerup the bus\n");
2389		goto err_out_free_dev;
2390	}
2391
2392	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2393		dev_err(sdev->dev,
2394			"Required 30BIT DMA mask unsupported by the system\n");
2395		goto err_out_powerdown;
2396	}
2397
2398	err = b44_get_invariants(bp);
2399	if (err) {
2400		dev_err(sdev->dev,
2401			"Problem fetching invariants of chip, aborting\n");
2402		goto err_out_powerdown;
2403	}
2404
2405	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2406		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2407		err = -ENODEV;
2408		goto err_out_powerdown;
2409	}
2410
2411	bp->mii_if.dev = dev;
2412	bp->mii_if.mdio_read = b44_mdio_read_mii;
2413	bp->mii_if.mdio_write = b44_mdio_write_mii;
2414	bp->mii_if.phy_id = bp->phy_addr;
2415	bp->mii_if.phy_id_mask = 0x1f;
2416	bp->mii_if.reg_num_mask = 0x1f;
2417
2418	/* By default, advertise all speed/duplex settings. */
2419	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2420		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2421
2422	/* By default, auto-negotiate PAUSE. */
2423	bp->flags |= B44_FLAG_PAUSE_AUTO;
2424
2425	err = register_netdev(dev);
2426	if (err) {
2427		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2428		goto err_out_powerdown;
2429	}
2430
2431	netif_carrier_off(dev);
2432
2433	ssb_set_drvdata(sdev, dev);
2434
2435	/* Chip reset provides power to the b44 MAC & PCI cores, which
2436	 * is necessary for MAC register access.
2437	 */
2438	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2439
2440	/* do a phy reset to test if there is an active phy */
2441	err = b44_phy_reset(bp);
2442	if (err < 0) {
2443		dev_err(sdev->dev, "phy reset failed\n");
2444		goto err_out_unregister_netdev;
2445	}
2446
2447	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2448		err = b44_register_phy_one(bp);
2449		if (err) {
2450			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2451			goto err_out_unregister_netdev;
2452		}
2453	}
2454
 
2455	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2456
2457	return 0;
2458
2459err_out_unregister_netdev:
2460	unregister_netdev(dev);
2461err_out_powerdown:
2462	ssb_bus_may_powerdown(sdev->bus);
2463
2464err_out_free_dev:
 
2465	free_netdev(dev);
2466
2467out:
2468	return err;
2469}
2470
2471static void b44_remove_one(struct ssb_device *sdev)
2472{
2473	struct net_device *dev = ssb_get_drvdata(sdev);
2474	struct b44 *bp = netdev_priv(dev);
2475
2476	unregister_netdev(dev);
2477	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2478		b44_unregister_phy_one(bp);
2479	ssb_device_disable(sdev, 0);
2480	ssb_bus_may_powerdown(sdev->bus);
 
2481	free_netdev(dev);
2482	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2483	ssb_set_drvdata(sdev, NULL);
2484}
2485
2486static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2487{
2488	struct net_device *dev = ssb_get_drvdata(sdev);
2489	struct b44 *bp = netdev_priv(dev);
2490
2491	if (!netif_running(dev))
2492		return 0;
2493
2494	del_timer_sync(&bp->timer);
2495
2496	spin_lock_irq(&bp->lock);
2497
2498	b44_halt(bp);
2499	netif_carrier_off(bp->dev);
2500	netif_device_detach(bp->dev);
2501	b44_free_rings(bp);
2502
2503	spin_unlock_irq(&bp->lock);
2504
2505	free_irq(dev->irq, dev);
2506	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2507		b44_init_hw(bp, B44_PARTIAL_RESET);
2508		b44_setup_wol(bp);
2509	}
2510
2511	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2512	return 0;
2513}
2514
2515static int b44_resume(struct ssb_device *sdev)
2516{
2517	struct net_device *dev = ssb_get_drvdata(sdev);
2518	struct b44 *bp = netdev_priv(dev);
2519	int rc = 0;
2520
2521	rc = ssb_bus_powerup(sdev->bus, 0);
2522	if (rc) {
2523		dev_err(sdev->dev,
2524			"Failed to powerup the bus\n");
2525		return rc;
2526	}
2527
2528	if (!netif_running(dev))
2529		return 0;
2530
2531	spin_lock_irq(&bp->lock);
2532	b44_init_rings(bp);
2533	b44_init_hw(bp, B44_FULL_RESET);
2534	spin_unlock_irq(&bp->lock);
2535
2536	/*
2537	 * As a shared interrupt, the handler can be called immediately. To be
2538	 * able to check the interrupt status the hardware must already be
2539	 * powered back on (b44_init_hw).
2540	 */
2541	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2542	if (rc) {
2543		netdev_err(dev, "request_irq failed\n");
2544		spin_lock_irq(&bp->lock);
2545		b44_halt(bp);
2546		b44_free_rings(bp);
2547		spin_unlock_irq(&bp->lock);
2548		return rc;
2549	}
2550
2551	netif_device_attach(bp->dev);
2552
2553	b44_enable_ints(bp);
2554	netif_wake_queue(dev);
2555
2556	mod_timer(&bp->timer, jiffies + 1);
2557
2558	return 0;
2559}
2560
2561static struct ssb_driver b44_ssb_driver = {
2562	.name		= DRV_MODULE_NAME,
2563	.id_table	= b44_ssb_tbl,
2564	.probe		= b44_init_one,
2565	.remove		= b44_remove_one,
2566	.suspend	= b44_suspend,
2567	.resume		= b44_resume,
2568};
2569
2570static inline int __init b44_pci_init(void)
2571{
2572	int err = 0;
2573#ifdef CONFIG_B44_PCI
2574	err = ssb_pcihost_register(&b44_pci_driver);
2575#endif
2576	return err;
2577}
2578
2579static inline void b44_pci_exit(void)
2580{
2581#ifdef CONFIG_B44_PCI
2582	ssb_pcihost_unregister(&b44_pci_driver);
2583#endif
2584}
2585
2586static int __init b44_init(void)
2587{
2588	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2589	int err;
2590
2591	/* Setup paramaters for syncing RX/TX DMA descriptors */
2592	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2593
2594	err = b44_pci_init();
2595	if (err)
2596		return err;
2597	err = ssb_driver_register(&b44_ssb_driver);
2598	if (err)
2599		b44_pci_exit();
2600	return err;
2601}
2602
2603static void __exit b44_cleanup(void)
2604{
2605	ssb_driver_unregister(&b44_ssb_driver);
2606	b44_pci_exit();
2607}
2608
2609module_init(b44_init);
2610module_exit(b44_cleanup);
2611
v5.9
   1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
   2 *
   3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
   4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
   5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
   6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
   7 * Copyright (C) 2006 Broadcom Corporation.
   8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
   9 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
  10 *
  11 * Distribute under GPL.
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <linux/types.h>
  20#include <linux/netdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/mii.h>
  23#include <linux/if_ether.h>
  24#include <linux/if_vlan.h>
  25#include <linux/etherdevice.h>
  26#include <linux/pci.h>
  27#include <linux/delay.h>
  28#include <linux/init.h>
  29#include <linux/interrupt.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/ssb/ssb.h>
  32#include <linux/slab.h>
  33#include <linux/phy.h>
  34
  35#include <linux/uaccess.h>
  36#include <asm/io.h>
  37#include <asm/irq.h>
  38
  39
  40#include "b44.h"
  41
  42#define DRV_MODULE_NAME		"b44"
 
  43#define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
  44
  45#define B44_DEF_MSG_ENABLE	  \
  46	(NETIF_MSG_DRV		| \
  47	 NETIF_MSG_PROBE	| \
  48	 NETIF_MSG_LINK		| \
  49	 NETIF_MSG_TIMER	| \
  50	 NETIF_MSG_IFDOWN	| \
  51	 NETIF_MSG_IFUP		| \
  52	 NETIF_MSG_RX_ERR	| \
  53	 NETIF_MSG_TX_ERR)
  54
  55/* length of time before we decide the hardware is borked,
  56 * and dev->tx_timeout() should be called to fix the problem
  57 */
  58#define B44_TX_TIMEOUT			(5 * HZ)
  59
  60/* hardware minimum and maximum for a single frame's data payload */
  61#define B44_MIN_MTU			ETH_ZLEN
  62#define B44_MAX_MTU			ETH_DATA_LEN
  63
  64#define B44_RX_RING_SIZE		512
  65#define B44_DEF_RX_RING_PENDING		200
  66#define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
  67				 B44_RX_RING_SIZE)
  68#define B44_TX_RING_SIZE		512
  69#define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
  70#define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
  71				 B44_TX_RING_SIZE)
  72
  73#define TX_RING_GAP(BP)	\
  74	(B44_TX_RING_SIZE - (BP)->tx_pending)
  75#define TX_BUFFS_AVAIL(BP)						\
  76	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
  77	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
  78	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  79#define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
  80
  81#define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
  82#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
  83
  84/* minimum number of free TX descriptors required to wake up TX process */
  85#define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
  86
  87/* b44 internal pattern match filter info */
  88#define B44_PATTERN_BASE	0x400
  89#define B44_PATTERN_SIZE	0x80
  90#define B44_PMASK_BASE		0x600
  91#define B44_PMASK_SIZE		0x10
  92#define B44_MAX_PATTERNS	16
  93#define B44_ETHIPV6UDP_HLEN	62
  94#define B44_ETHIPV4UDP_HLEN	42
  95
  96MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
  97MODULE_DESCRIPTION(DRV_DESCRIPTION);
  98MODULE_LICENSE("GPL");
 
  99
 100static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
 101module_param(b44_debug, int, 0);
 102MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
 103
 104
 105#ifdef CONFIG_B44_PCI
 106static const struct pci_device_id b44_pci_tbl[] = {
 107	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
 108	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
 109	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
 110	{ 0 } /* terminate list with empty entry */
 111};
 112MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
 113
 114static struct pci_driver b44_pci_driver = {
 115	.name		= DRV_MODULE_NAME,
 116	.id_table	= b44_pci_tbl,
 117};
 118#endif /* CONFIG_B44_PCI */
 119
 120static const struct ssb_device_id b44_ssb_tbl[] = {
 121	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
 122	{},
 123};
 124MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
 125
 126static void b44_halt(struct b44 *);
 127static void b44_init_rings(struct b44 *);
 128
 129#define B44_FULL_RESET		1
 130#define B44_FULL_RESET_SKIP_PHY	2
 131#define B44_PARTIAL_RESET	3
 132#define B44_CHIP_RESET_FULL	4
 133#define B44_CHIP_RESET_PARTIAL	5
 134
 135static void b44_init_hw(struct b44 *, int);
 136
 137static int dma_desc_sync_size;
 138static int instance;
 139
 140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
 141#define _B44(x...)	# x,
 142B44_STAT_REG_DECLARE
 143#undef _B44
 144};
 145
 146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
 147						dma_addr_t dma_base,
 148						unsigned long offset,
 149						enum dma_data_direction dir)
 150{
 151	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
 152				   dma_desc_sync_size, dir);
 153}
 154
 155static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 156					     dma_addr_t dma_base,
 157					     unsigned long offset,
 158					     enum dma_data_direction dir)
 159{
 160	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
 161				dma_desc_sync_size, dir);
 162}
 163
 164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 165{
 166	return ssb_read32(bp->sdev, reg);
 167}
 168
 169static inline void bw32(const struct b44 *bp,
 170			unsigned long reg, unsigned long val)
 171{
 172	ssb_write32(bp->sdev, reg, val);
 173}
 174
 175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
 176			u32 bit, unsigned long timeout, const int clear)
 177{
 178	unsigned long i;
 179
 180	for (i = 0; i < timeout; i++) {
 181		u32 val = br32(bp, reg);
 182
 183		if (clear && !(val & bit))
 184			break;
 185		if (!clear && (val & bit))
 186			break;
 187		udelay(10);
 188	}
 189	if (i == timeout) {
 190		if (net_ratelimit())
 191			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
 192				   bit, reg, clear ? "clear" : "set");
 193
 194		return -ENODEV;
 195	}
 196	return 0;
 197}
 198
 199static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
 200{
 201	u32 val;
 202
 203	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
 204			    (index << CAM_CTRL_INDEX_SHIFT)));
 205
 206	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 207
 208	val = br32(bp, B44_CAM_DATA_LO);
 209
 210	data[2] = (val >> 24) & 0xFF;
 211	data[3] = (val >> 16) & 0xFF;
 212	data[4] = (val >> 8) & 0xFF;
 213	data[5] = (val >> 0) & 0xFF;
 214
 215	val = br32(bp, B44_CAM_DATA_HI);
 216
 217	data[0] = (val >> 8) & 0xFF;
 218	data[1] = (val >> 0) & 0xFF;
 219}
 220
 221static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
 222{
 223	u32 val;
 224
 225	val  = ((u32) data[2]) << 24;
 226	val |= ((u32) data[3]) << 16;
 227	val |= ((u32) data[4]) <<  8;
 228	val |= ((u32) data[5]) <<  0;
 229	bw32(bp, B44_CAM_DATA_LO, val);
 230	val = (CAM_DATA_HI_VALID |
 231	       (((u32) data[0]) << 8) |
 232	       (((u32) data[1]) << 0));
 233	bw32(bp, B44_CAM_DATA_HI, val);
 234	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
 235			    (index << CAM_CTRL_INDEX_SHIFT)));
 236	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 237}
 238
 239static inline void __b44_disable_ints(struct b44 *bp)
 240{
 241	bw32(bp, B44_IMASK, 0);
 242}
 243
 244static void b44_disable_ints(struct b44 *bp)
 245{
 246	__b44_disable_ints(bp);
 247
 248	/* Flush posted writes. */
 249	br32(bp, B44_IMASK);
 250}
 251
 252static void b44_enable_ints(struct b44 *bp)
 253{
 254	bw32(bp, B44_IMASK, bp->imask);
 255}
 256
 257static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
 258{
 259	int err;
 260
 261	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 262	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 263			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
 264			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
 265			     (reg << MDIO_DATA_RA_SHIFT) |
 266			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
 267	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 268	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
 269
 270	return err;
 271}
 272
 273static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
 274{
 275	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 276	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 277			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
 278			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
 279			     (reg << MDIO_DATA_RA_SHIFT) |
 280			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
 281			     (val & MDIO_DATA_DATA)));
 282	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 283}
 284
 285static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 286{
 287	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 288		return 0;
 289
 290	return __b44_readphy(bp, bp->phy_addr, reg, val);
 291}
 292
 293static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
 294{
 295	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 296		return 0;
 297
 298	return __b44_writephy(bp, bp->phy_addr, reg, val);
 299}
 300
 301/* miilib interface */
 302static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
 303{
 304	u32 val;
 305	struct b44 *bp = netdev_priv(dev);
 306	int rc = __b44_readphy(bp, phy_id, location, &val);
 307	if (rc)
 308		return 0xffffffff;
 309	return val;
 310}
 311
 312static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
 313			       int val)
 314{
 315	struct b44 *bp = netdev_priv(dev);
 316	__b44_writephy(bp, phy_id, location, val);
 317}
 318
 319static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
 320{
 321	u32 val;
 322	struct b44 *bp = bus->priv;
 323	int rc = __b44_readphy(bp, phy_id, location, &val);
 324	if (rc)
 325		return 0xffffffff;
 326	return val;
 327}
 328
 329static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
 330				 u16 val)
 331{
 332	struct b44 *bp = bus->priv;
 333	return __b44_writephy(bp, phy_id, location, val);
 334}
 335
 336static int b44_phy_reset(struct b44 *bp)
 337{
 338	u32 val;
 339	int err;
 340
 341	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 342		return 0;
 343	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
 344	if (err)
 345		return err;
 346	udelay(100);
 347	err = b44_readphy(bp, MII_BMCR, &val);
 348	if (!err) {
 349		if (val & BMCR_RESET) {
 350			netdev_err(bp->dev, "PHY Reset would not complete\n");
 351			err = -ENODEV;
 352		}
 353	}
 354
 355	return err;
 356}
 357
 358static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
 359{
 360	u32 val;
 361
 362	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
 363	bp->flags |= pause_flags;
 364
 365	val = br32(bp, B44_RXCONFIG);
 366	if (pause_flags & B44_FLAG_RX_PAUSE)
 367		val |= RXCONFIG_FLOW;
 368	else
 369		val &= ~RXCONFIG_FLOW;
 370	bw32(bp, B44_RXCONFIG, val);
 371
 372	val = br32(bp, B44_MAC_FLOW);
 373	if (pause_flags & B44_FLAG_TX_PAUSE)
 374		val |= (MAC_FLOW_PAUSE_ENAB |
 375			(0xc0 & MAC_FLOW_RX_HI_WATER));
 376	else
 377		val &= ~MAC_FLOW_PAUSE_ENAB;
 378	bw32(bp, B44_MAC_FLOW, val);
 379}
 380
 381static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
 382{
 383	u32 pause_enab = 0;
 384
 385	/* The driver supports only rx pause by default because
 386	   the b44 mac tx pause mechanism generates excessive
 387	   pause frames.
 388	   Use ethtool to turn on b44 tx pause if necessary.
 389	 */
 390	if ((local & ADVERTISE_PAUSE_CAP) &&
 391	    (local & ADVERTISE_PAUSE_ASYM)){
 392		if ((remote & LPA_PAUSE_ASYM) &&
 393		    !(remote & LPA_PAUSE_CAP))
 394			pause_enab |= B44_FLAG_RX_PAUSE;
 395	}
 396
 397	__b44_set_flow_ctrl(bp, pause_enab);
 398}
 399
 400#ifdef CONFIG_BCM47XX
 401#include <linux/bcm47xx_nvram.h>
 402static void b44_wap54g10_workaround(struct b44 *bp)
 403{
 404	char buf[20];
 405	u32 val;
 406	int err;
 407
 408	/*
 409	 * workaround for bad hardware design in Linksys WAP54G v1.0
 410	 * see https://dev.openwrt.org/ticket/146
 411	 * check and reset bit "isolate"
 412	 */
 413	if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
 414		return;
 415	if (simple_strtoul(buf, NULL, 0) == 2) {
 416		err = __b44_readphy(bp, 0, MII_BMCR, &val);
 417		if (err)
 418			goto error;
 419		if (!(val & BMCR_ISOLATE))
 420			return;
 421		val &= ~BMCR_ISOLATE;
 422		err = __b44_writephy(bp, 0, MII_BMCR, val);
 423		if (err)
 424			goto error;
 425	}
 426	return;
 427error:
 428	pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
 429}
 430#else
 431static inline void b44_wap54g10_workaround(struct b44 *bp)
 432{
 433}
 434#endif
 435
 436static int b44_setup_phy(struct b44 *bp)
 437{
 438	u32 val;
 439	int err;
 440
 441	b44_wap54g10_workaround(bp);
 442
 443	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 444		return 0;
 445	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
 446		goto out;
 447	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
 448				val & MII_ALEDCTRL_ALLMSK)) != 0)
 449		goto out;
 450	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
 451		goto out;
 452	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
 453				val | MII_TLEDCTRL_ENABLE)) != 0)
 454		goto out;
 455
 456	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
 457		u32 adv = ADVERTISE_CSMA;
 458
 459		if (bp->flags & B44_FLAG_ADV_10HALF)
 460			adv |= ADVERTISE_10HALF;
 461		if (bp->flags & B44_FLAG_ADV_10FULL)
 462			adv |= ADVERTISE_10FULL;
 463		if (bp->flags & B44_FLAG_ADV_100HALF)
 464			adv |= ADVERTISE_100HALF;
 465		if (bp->flags & B44_FLAG_ADV_100FULL)
 466			adv |= ADVERTISE_100FULL;
 467
 468		if (bp->flags & B44_FLAG_PAUSE_AUTO)
 469			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 470
 471		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
 472			goto out;
 473		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
 474						       BMCR_ANRESTART))) != 0)
 475			goto out;
 476	} else {
 477		u32 bmcr;
 478
 479		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
 480			goto out;
 481		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
 482		if (bp->flags & B44_FLAG_100_BASE_T)
 483			bmcr |= BMCR_SPEED100;
 484		if (bp->flags & B44_FLAG_FULL_DUPLEX)
 485			bmcr |= BMCR_FULLDPLX;
 486		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
 487			goto out;
 488
 489		/* Since we will not be negotiating there is no safe way
 490		 * to determine if the link partner supports flow control
 491		 * or not.  So just disable it completely in this case.
 492		 */
 493		b44_set_flow_ctrl(bp, 0, 0);
 494	}
 495
 496out:
 497	return err;
 498}
 499
 500static void b44_stats_update(struct b44 *bp)
 501{
 502	unsigned long reg;
 503	u64 *val;
 504
 505	val = &bp->hw_stats.tx_good_octets;
 506	u64_stats_update_begin(&bp->hw_stats.syncp);
 507
 508	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
 509		*val++ += br32(bp, reg);
 510	}
 511
 
 
 
 512	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
 513		*val++ += br32(bp, reg);
 514	}
 515
 516	u64_stats_update_end(&bp->hw_stats.syncp);
 517}
 518
 519static void b44_link_report(struct b44 *bp)
 520{
 521	if (!netif_carrier_ok(bp->dev)) {
 522		netdev_info(bp->dev, "Link is down\n");
 523	} else {
 524		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
 525			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
 526			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
 527
 528		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
 529			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
 530			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
 531	}
 532}
 533
 534static void b44_check_phy(struct b44 *bp)
 535{
 536	u32 bmsr, aux;
 537
 538	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
 539		bp->flags |= B44_FLAG_100_BASE_T;
 540		if (!netif_carrier_ok(bp->dev)) {
 541			u32 val = br32(bp, B44_TX_CTRL);
 542			if (bp->flags & B44_FLAG_FULL_DUPLEX)
 543				val |= TX_CTRL_DUPLEX;
 544			else
 545				val &= ~TX_CTRL_DUPLEX;
 546			bw32(bp, B44_TX_CTRL, val);
 547			netif_carrier_on(bp->dev);
 548			b44_link_report(bp);
 549		}
 550		return;
 551	}
 552
 553	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
 554	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
 555	    (bmsr != 0xffff)) {
 556		if (aux & MII_AUXCTRL_SPEED)
 557			bp->flags |= B44_FLAG_100_BASE_T;
 558		else
 559			bp->flags &= ~B44_FLAG_100_BASE_T;
 560		if (aux & MII_AUXCTRL_DUPLEX)
 561			bp->flags |= B44_FLAG_FULL_DUPLEX;
 562		else
 563			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
 564
 565		if (!netif_carrier_ok(bp->dev) &&
 566		    (bmsr & BMSR_LSTATUS)) {
 567			u32 val = br32(bp, B44_TX_CTRL);
 568			u32 local_adv, remote_adv;
 569
 570			if (bp->flags & B44_FLAG_FULL_DUPLEX)
 571				val |= TX_CTRL_DUPLEX;
 572			else
 573				val &= ~TX_CTRL_DUPLEX;
 574			bw32(bp, B44_TX_CTRL, val);
 575
 576			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
 577			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
 578			    !b44_readphy(bp, MII_LPA, &remote_adv))
 579				b44_set_flow_ctrl(bp, local_adv, remote_adv);
 580
 581			/* Link now up */
 582			netif_carrier_on(bp->dev);
 583			b44_link_report(bp);
 584		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
 585			/* Link now down */
 586			netif_carrier_off(bp->dev);
 587			b44_link_report(bp);
 588		}
 589
 590		if (bmsr & BMSR_RFAULT)
 591			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
 592		if (bmsr & BMSR_JCD)
 593			netdev_warn(bp->dev, "Jabber detected in PHY\n");
 594	}
 595}
 596
 597static void b44_timer(struct timer_list *t)
 598{
 599	struct b44 *bp = from_timer(bp, t, timer);
 600
 601	spin_lock_irq(&bp->lock);
 602
 603	b44_check_phy(bp);
 604
 605	b44_stats_update(bp);
 606
 607	spin_unlock_irq(&bp->lock);
 608
 609	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
 610}
 611
 612static void b44_tx(struct b44 *bp)
 613{
 614	u32 cur, cons;
 615	unsigned bytes_compl = 0, pkts_compl = 0;
 616
 617	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 618	cur /= sizeof(struct dma_desc);
 619
 620	/* XXX needs updating when NETIF_F_SG is supported */
 621	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
 622		struct ring_info *rp = &bp->tx_buffers[cons];
 623		struct sk_buff *skb = rp->skb;
 624
 625		BUG_ON(skb == NULL);
 626
 627		dma_unmap_single(bp->sdev->dma_dev,
 628				 rp->mapping,
 629				 skb->len,
 630				 DMA_TO_DEVICE);
 631		rp->skb = NULL;
 632
 633		bytes_compl += skb->len;
 634		pkts_compl++;
 635
 636		dev_consume_skb_irq(skb);
 637	}
 638
 639	netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
 640	bp->tx_cons = cons;
 641	if (netif_queue_stopped(bp->dev) &&
 642	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
 643		netif_wake_queue(bp->dev);
 644
 645	bw32(bp, B44_GPTIMER, 0);
 646}
 647
 648/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
 649 * before the DMA address you give it.  So we allocate 30 more bytes
 650 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
 651 * point the chip at 30 bytes past where the rx_header will go.
 652 */
 653static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 654{
 655	struct dma_desc *dp;
 656	struct ring_info *src_map, *map;
 657	struct rx_header *rh;
 658	struct sk_buff *skb;
 659	dma_addr_t mapping;
 660	int dest_idx;
 661	u32 ctrl;
 662
 663	src_map = NULL;
 664	if (src_idx >= 0)
 665		src_map = &bp->rx_buffers[src_idx];
 666	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 667	map = &bp->rx_buffers[dest_idx];
 668	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
 669	if (skb == NULL)
 670		return -ENOMEM;
 671
 672	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 673				 RX_PKT_BUF_SZ,
 674				 DMA_FROM_DEVICE);
 675
 676	/* Hardware bug work-around, the chip is unable to do PCI DMA
 677	   to/from anything above 1GB :-( */
 678	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 679		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 680		/* Sigh... */
 681		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 682			dma_unmap_single(bp->sdev->dma_dev, mapping,
 683					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 684		dev_kfree_skb_any(skb);
 685		skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
 686		if (skb == NULL)
 687			return -ENOMEM;
 688		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 689					 RX_PKT_BUF_SZ,
 690					 DMA_FROM_DEVICE);
 691		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 692		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 693			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 694				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 695			dev_kfree_skb_any(skb);
 696			return -ENOMEM;
 697		}
 698		bp->force_copybreak = 1;
 699	}
 700
 701	rh = (struct rx_header *) skb->data;
 702
 703	rh->len = 0;
 704	rh->flags = 0;
 705
 706	map->skb = skb;
 707	map->mapping = mapping;
 708
 709	if (src_map != NULL)
 710		src_map->skb = NULL;
 711
 712	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
 713	if (dest_idx == (B44_RX_RING_SIZE - 1))
 714		ctrl |= DESC_CTRL_EOT;
 715
 716	dp = &bp->rx_ring[dest_idx];
 717	dp->ctrl = cpu_to_le32(ctrl);
 718	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
 719
 720	if (bp->flags & B44_FLAG_RX_RING_HACK)
 721		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 722			                    dest_idx * sizeof(*dp),
 723			                    DMA_BIDIRECTIONAL);
 724
 725	return RX_PKT_BUF_SZ;
 726}
 727
 728static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 729{
 730	struct dma_desc *src_desc, *dest_desc;
 731	struct ring_info *src_map, *dest_map;
 732	struct rx_header *rh;
 733	int dest_idx;
 734	__le32 ctrl;
 735
 736	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 737	dest_desc = &bp->rx_ring[dest_idx];
 738	dest_map = &bp->rx_buffers[dest_idx];
 739	src_desc = &bp->rx_ring[src_idx];
 740	src_map = &bp->rx_buffers[src_idx];
 741
 742	dest_map->skb = src_map->skb;
 743	rh = (struct rx_header *) src_map->skb->data;
 744	rh->len = 0;
 745	rh->flags = 0;
 746	dest_map->mapping = src_map->mapping;
 747
 748	if (bp->flags & B44_FLAG_RX_RING_HACK)
 749		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
 750			                 src_idx * sizeof(*src_desc),
 751			                 DMA_BIDIRECTIONAL);
 752
 753	ctrl = src_desc->ctrl;
 754	if (dest_idx == (B44_RX_RING_SIZE - 1))
 755		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
 756	else
 757		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
 758
 759	dest_desc->ctrl = ctrl;
 760	dest_desc->addr = src_desc->addr;
 761
 762	src_map->skb = NULL;
 763
 764	if (bp->flags & B44_FLAG_RX_RING_HACK)
 765		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 766					     dest_idx * sizeof(*dest_desc),
 767					     DMA_BIDIRECTIONAL);
 768
 769	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
 770				   RX_PKT_BUF_SZ,
 771				   DMA_FROM_DEVICE);
 772}
 773
 774static int b44_rx(struct b44 *bp, int budget)
 775{
 776	int received;
 777	u32 cons, prod;
 778
 779	received = 0;
 780	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
 781	prod /= sizeof(struct dma_desc);
 782	cons = bp->rx_cons;
 783
 784	while (cons != prod && budget > 0) {
 785		struct ring_info *rp = &bp->rx_buffers[cons];
 786		struct sk_buff *skb = rp->skb;
 787		dma_addr_t map = rp->mapping;
 788		struct rx_header *rh;
 789		u16 len;
 790
 791		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
 792					RX_PKT_BUF_SZ,
 793					DMA_FROM_DEVICE);
 794		rh = (struct rx_header *) skb->data;
 795		len = le16_to_cpu(rh->len);
 796		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 797		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 798		drop_it:
 799			b44_recycle_rx(bp, cons, bp->rx_prod);
 800		drop_it_no_recycle:
 801			bp->dev->stats.rx_dropped++;
 802			goto next_pkt;
 803		}
 804
 805		if (len == 0) {
 806			int i = 0;
 807
 808			do {
 809				udelay(2);
 810				barrier();
 811				len = le16_to_cpu(rh->len);
 812			} while (len == 0 && i++ < 5);
 813			if (len == 0)
 814				goto drop_it;
 815		}
 816
 817		/* Omit CRC. */
 818		len -= 4;
 819
 820		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
 821			int skb_size;
 822			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 823			if (skb_size < 0)
 824				goto drop_it;
 825			dma_unmap_single(bp->sdev->dma_dev, map,
 826					 skb_size, DMA_FROM_DEVICE);
 827			/* Leave out rx_header */
 828			skb_put(skb, len + RX_PKT_OFFSET);
 829			skb_pull(skb, RX_PKT_OFFSET);
 830		} else {
 831			struct sk_buff *copy_skb;
 832
 833			b44_recycle_rx(bp, cons, bp->rx_prod);
 834			copy_skb = napi_alloc_skb(&bp->napi, len);
 835			if (copy_skb == NULL)
 836				goto drop_it_no_recycle;
 837
 838			skb_put(copy_skb, len);
 839			/* DMA sync done above, copy just the actual packet */
 840			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
 841							 copy_skb->data, len);
 842			skb = copy_skb;
 843		}
 844		skb_checksum_none_assert(skb);
 845		skb->protocol = eth_type_trans(skb, bp->dev);
 846		netif_receive_skb(skb);
 847		received++;
 848		budget--;
 849	next_pkt:
 850		bp->rx_prod = (bp->rx_prod + 1) &
 851			(B44_RX_RING_SIZE - 1);
 852		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
 853	}
 854
 855	bp->rx_cons = cons;
 856	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
 857
 858	return received;
 859}
 860
 861static int b44_poll(struct napi_struct *napi, int budget)
 862{
 863	struct b44 *bp = container_of(napi, struct b44, napi);
 864	int work_done;
 865	unsigned long flags;
 866
 867	spin_lock_irqsave(&bp->lock, flags);
 868
 869	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
 870		/* spin_lock(&bp->tx_lock); */
 871		b44_tx(bp);
 872		/* spin_unlock(&bp->tx_lock); */
 873	}
 874	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
 875		bp->istat &= ~ISTAT_RFO;
 876		b44_disable_ints(bp);
 877		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
 878		b44_init_rings(bp);
 879		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 880		netif_wake_queue(bp->dev);
 881	}
 882
 883	spin_unlock_irqrestore(&bp->lock, flags);
 884
 885	work_done = 0;
 886	if (bp->istat & ISTAT_RX)
 887		work_done += b44_rx(bp, budget);
 888
 889	if (bp->istat & ISTAT_ERRORS) {
 890		spin_lock_irqsave(&bp->lock, flags);
 891		b44_halt(bp);
 892		b44_init_rings(bp);
 893		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 894		netif_wake_queue(bp->dev);
 895		spin_unlock_irqrestore(&bp->lock, flags);
 896		work_done = 0;
 897	}
 898
 899	if (work_done < budget) {
 900		napi_complete_done(napi, work_done);
 901		b44_enable_ints(bp);
 902	}
 903
 904	return work_done;
 905}
 906
 907static irqreturn_t b44_interrupt(int irq, void *dev_id)
 908{
 909	struct net_device *dev = dev_id;
 910	struct b44 *bp = netdev_priv(dev);
 911	u32 istat, imask;
 912	int handled = 0;
 913
 914	spin_lock(&bp->lock);
 915
 916	istat = br32(bp, B44_ISTAT);
 917	imask = br32(bp, B44_IMASK);
 918
 919	/* The interrupt mask register controls which interrupt bits
 920	 * will actually raise an interrupt to the CPU when set by hw/firmware,
 921	 * but doesn't mask off the bits.
 922	 */
 923	istat &= imask;
 924	if (istat) {
 925		handled = 1;
 926
 927		if (unlikely(!netif_running(dev))) {
 928			netdev_info(dev, "late interrupt\n");
 929			goto irq_ack;
 930		}
 931
 932		if (napi_schedule_prep(&bp->napi)) {
 933			/* NOTE: These writes are posted by the readback of
 934			 *       the ISTAT register below.
 935			 */
 936			bp->istat = istat;
 937			__b44_disable_ints(bp);
 938			__napi_schedule(&bp->napi);
 939		}
 940
 941irq_ack:
 942		bw32(bp, B44_ISTAT, istat);
 943		br32(bp, B44_ISTAT);
 944	}
 945	spin_unlock(&bp->lock);
 946	return IRQ_RETVAL(handled);
 947}
 948
 949static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
 950{
 951	struct b44 *bp = netdev_priv(dev);
 952
 953	netdev_err(dev, "transmit timed out, resetting\n");
 954
 955	spin_lock_irq(&bp->lock);
 956
 957	b44_halt(bp);
 958	b44_init_rings(bp);
 959	b44_init_hw(bp, B44_FULL_RESET);
 960
 961	spin_unlock_irq(&bp->lock);
 962
 963	b44_enable_ints(bp);
 964
 965	netif_wake_queue(dev);
 966}
 967
 968static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 969{
 970	struct b44 *bp = netdev_priv(dev);
 971	int rc = NETDEV_TX_OK;
 972	dma_addr_t mapping;
 973	u32 len, entry, ctrl;
 974	unsigned long flags;
 975
 976	len = skb->len;
 977	spin_lock_irqsave(&bp->lock, flags);
 978
 979	/* This is a hard error, log it. */
 980	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
 981		netif_stop_queue(dev);
 982		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 983		goto err_out;
 984	}
 985
 986	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
 987	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
 988		struct sk_buff *bounce_skb;
 989
 990		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
 991		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 992			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
 993					     DMA_TO_DEVICE);
 994
 995		bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
 996		if (!bounce_skb)
 997			goto err_out;
 998
 999		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1000					 len, DMA_TO_DEVICE);
1001		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1002			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1003				dma_unmap_single(bp->sdev->dma_dev, mapping,
1004						     len, DMA_TO_DEVICE);
1005			dev_kfree_skb_any(bounce_skb);
1006			goto err_out;
1007		}
1008
1009		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1010		dev_consume_skb_any(skb);
1011		skb = bounce_skb;
1012	}
1013
1014	entry = bp->tx_prod;
1015	bp->tx_buffers[entry].skb = skb;
1016	bp->tx_buffers[entry].mapping = mapping;
1017
1018	ctrl  = (len & DESC_CTRL_LEN);
1019	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020	if (entry == (B44_TX_RING_SIZE - 1))
1021		ctrl |= DESC_CTRL_EOT;
1022
1023	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025
1026	if (bp->flags & B44_FLAG_TX_RING_HACK)
1027		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1028			                    entry * sizeof(bp->tx_ring[0]),
1029			                    DMA_TO_DEVICE);
1030
1031	entry = NEXT_TX(entry);
1032
1033	bp->tx_prod = entry;
1034
1035	wmb();
1036
1037	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040	if (bp->flags & B44_FLAG_REORDER_BUG)
1041		br32(bp, B44_DMATX_PTR);
1042
1043	netdev_sent_queue(dev, skb->len);
1044
1045	if (TX_BUFFS_AVAIL(bp) < 1)
1046		netif_stop_queue(dev);
1047
1048out_unlock:
1049	spin_unlock_irqrestore(&bp->lock, flags);
1050
1051	return rc;
1052
1053err_out:
1054	rc = NETDEV_TX_BUSY;
1055	goto out_unlock;
1056}
1057
1058static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059{
1060	struct b44 *bp = netdev_priv(dev);
1061
 
 
 
1062	if (!netif_running(dev)) {
1063		/* We'll just catch it later when the
1064		 * device is up'd.
1065		 */
1066		dev->mtu = new_mtu;
1067		return 0;
1068	}
1069
1070	spin_lock_irq(&bp->lock);
1071	b44_halt(bp);
1072	dev->mtu = new_mtu;
1073	b44_init_rings(bp);
1074	b44_init_hw(bp, B44_FULL_RESET);
1075	spin_unlock_irq(&bp->lock);
1076
1077	b44_enable_ints(bp);
1078
1079	return 0;
1080}
1081
1082/* Free up pending packets in all rx/tx rings.
1083 *
1084 * The chip has been shut down and the driver detached from
1085 * the networking, so no interrupts or new tx packets will
1086 * end up in the driver.  bp->lock is not held and we are not
1087 * in an interrupt context and thus may sleep.
1088 */
1089static void b44_free_rings(struct b44 *bp)
1090{
1091	struct ring_info *rp;
1092	int i;
1093
1094	for (i = 0; i < B44_RX_RING_SIZE; i++) {
1095		rp = &bp->rx_buffers[i];
1096
1097		if (rp->skb == NULL)
1098			continue;
1099		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1100				 DMA_FROM_DEVICE);
1101		dev_kfree_skb_any(rp->skb);
1102		rp->skb = NULL;
1103	}
1104
1105	/* XXX needs changes once NETIF_F_SG is set... */
1106	for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107		rp = &bp->tx_buffers[i];
1108
1109		if (rp->skb == NULL)
1110			continue;
1111		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1112				 DMA_TO_DEVICE);
1113		dev_kfree_skb_any(rp->skb);
1114		rp->skb = NULL;
1115	}
1116}
1117
1118/* Initialize tx/rx rings for packet processing.
1119 *
1120 * The chip has been shut down and the driver detached from
1121 * the networking, so no interrupts or new tx packets will
1122 * end up in the driver.
1123 */
1124static void b44_init_rings(struct b44 *bp)
1125{
1126	int i;
1127
1128	b44_free_rings(bp);
1129
1130	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1131	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1132
1133	if (bp->flags & B44_FLAG_RX_RING_HACK)
1134		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1135					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1136
1137	if (bp->flags & B44_FLAG_TX_RING_HACK)
1138		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1139					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
1140
1141	for (i = 0; i < bp->rx_pending; i++) {
1142		if (b44_alloc_rx_skb(bp, -1, i) < 0)
1143			break;
1144	}
1145}
1146
1147/*
1148 * Must not be invoked with interrupt sources disabled and
1149 * the hardware shutdown down.
1150 */
1151static void b44_free_consistent(struct b44 *bp)
1152{
1153	kfree(bp->rx_buffers);
1154	bp->rx_buffers = NULL;
1155	kfree(bp->tx_buffers);
1156	bp->tx_buffers = NULL;
1157	if (bp->rx_ring) {
1158		if (bp->flags & B44_FLAG_RX_RING_HACK) {
1159			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1160					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1161			kfree(bp->rx_ring);
1162		} else
1163			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1164					  bp->rx_ring, bp->rx_ring_dma);
1165		bp->rx_ring = NULL;
1166		bp->flags &= ~B44_FLAG_RX_RING_HACK;
1167	}
1168	if (bp->tx_ring) {
1169		if (bp->flags & B44_FLAG_TX_RING_HACK) {
1170			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1171					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1172			kfree(bp->tx_ring);
1173		} else
1174			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1175					  bp->tx_ring, bp->tx_ring_dma);
1176		bp->tx_ring = NULL;
1177		bp->flags &= ~B44_FLAG_TX_RING_HACK;
1178	}
1179}
1180
1181/*
1182 * Must not be invoked with interrupt sources disabled and
1183 * the hardware shutdown down.  Can sleep.
1184 */
1185static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1186{
1187	int size;
1188
1189	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1190	bp->rx_buffers = kzalloc(size, gfp);
1191	if (!bp->rx_buffers)
1192		goto out_err;
1193
1194	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1195	bp->tx_buffers = kzalloc(size, gfp);
1196	if (!bp->tx_buffers)
1197		goto out_err;
1198
1199	size = DMA_TABLE_BYTES;
1200	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1201					 &bp->rx_ring_dma, gfp);
1202	if (!bp->rx_ring) {
1203		/* Allocation may have failed due to pci_alloc_consistent
1204		   insisting on use of GFP_DMA, which is more restrictive
1205		   than necessary...  */
1206		struct dma_desc *rx_ring;
1207		dma_addr_t rx_ring_dma;
1208
1209		rx_ring = kzalloc(size, gfp);
1210		if (!rx_ring)
1211			goto out_err;
1212
1213		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1214					     DMA_TABLE_BYTES,
1215					     DMA_BIDIRECTIONAL);
1216
1217		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1218			rx_ring_dma + size > DMA_BIT_MASK(30)) {
1219			kfree(rx_ring);
1220			goto out_err;
1221		}
1222
1223		bp->rx_ring = rx_ring;
1224		bp->rx_ring_dma = rx_ring_dma;
1225		bp->flags |= B44_FLAG_RX_RING_HACK;
1226	}
1227
1228	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1229					 &bp->tx_ring_dma, gfp);
1230	if (!bp->tx_ring) {
1231		/* Allocation may have failed due to ssb_dma_alloc_consistent
1232		   insisting on use of GFP_DMA, which is more restrictive
1233		   than necessary...  */
1234		struct dma_desc *tx_ring;
1235		dma_addr_t tx_ring_dma;
1236
1237		tx_ring = kzalloc(size, gfp);
1238		if (!tx_ring)
1239			goto out_err;
1240
1241		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1242					     DMA_TABLE_BYTES,
1243					     DMA_TO_DEVICE);
1244
1245		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1246			tx_ring_dma + size > DMA_BIT_MASK(30)) {
1247			kfree(tx_ring);
1248			goto out_err;
1249		}
1250
1251		bp->tx_ring = tx_ring;
1252		bp->tx_ring_dma = tx_ring_dma;
1253		bp->flags |= B44_FLAG_TX_RING_HACK;
1254	}
1255
1256	return 0;
1257
1258out_err:
1259	b44_free_consistent(bp);
1260	return -ENOMEM;
1261}
1262
1263/* bp->lock is held. */
1264static void b44_clear_stats(struct b44 *bp)
1265{
1266	unsigned long reg;
1267
1268	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1269	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1270		br32(bp, reg);
1271	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1272		br32(bp, reg);
1273}
1274
1275/* bp->lock is held. */
1276static void b44_chip_reset(struct b44 *bp, int reset_kind)
1277{
1278	struct ssb_device *sdev = bp->sdev;
1279	bool was_enabled;
1280
1281	was_enabled = ssb_device_is_enabled(bp->sdev);
1282
1283	ssb_device_enable(bp->sdev, 0);
1284	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1285
1286	if (was_enabled) {
1287		bw32(bp, B44_RCV_LAZY, 0);
1288		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1289		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1290		bw32(bp, B44_DMATX_CTRL, 0);
1291		bp->tx_prod = bp->tx_cons = 0;
1292		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1294				     100, 0);
1295		}
1296		bw32(bp, B44_DMARX_CTRL, 0);
1297		bp->rx_prod = bp->rx_cons = 0;
1298	}
1299
1300	b44_clear_stats(bp);
1301
1302	/*
1303	 * Don't enable PHY if we are doing a partial reset
1304	 * we are probably going to power down
1305	 */
1306	if (reset_kind == B44_CHIP_RESET_PARTIAL)
1307		return;
1308
1309	switch (sdev->bus->bustype) {
1310	case SSB_BUSTYPE_SSB:
1311		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1312		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1313					B44_MDC_RATIO)
1314		     & MDIO_CTRL_MAXF_MASK)));
1315		break;
1316	case SSB_BUSTYPE_PCI:
1317		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1318		     (0x0d & MDIO_CTRL_MAXF_MASK)));
1319		break;
1320	case SSB_BUSTYPE_PCMCIA:
1321	case SSB_BUSTYPE_SDIO:
1322		WARN_ON(1); /* A device with this bus does not exist. */
1323		break;
1324	}
1325
1326	br32(bp, B44_MDIO_CTRL);
1327
1328	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1329		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1330		br32(bp, B44_ENET_CTRL);
1331		bp->flags |= B44_FLAG_EXTERNAL_PHY;
1332	} else {
1333		u32 val = br32(bp, B44_DEVCTRL);
1334
1335		if (val & DEVCTRL_EPR) {
1336			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1337			br32(bp, B44_DEVCTRL);
1338			udelay(100);
1339		}
1340		bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1341	}
1342}
1343
1344/* bp->lock is held. */
1345static void b44_halt(struct b44 *bp)
1346{
1347	b44_disable_ints(bp);
1348	/* reset PHY */
1349	b44_phy_reset(bp);
1350	/* power down PHY */
1351	netdev_info(bp->dev, "powering down PHY\n");
1352	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1353	/* now reset the chip, but without enabling the MAC&PHY
1354	 * part of it. This has to be done _after_ we shut down the PHY */
1355	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1356		b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1357	else
1358		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1359}
1360
1361/* bp->lock is held. */
1362static void __b44_set_mac_addr(struct b44 *bp)
1363{
1364	bw32(bp, B44_CAM_CTRL, 0);
1365	if (!(bp->dev->flags & IFF_PROMISC)) {
1366		u32 val;
1367
1368		__b44_cam_write(bp, bp->dev->dev_addr, 0);
1369		val = br32(bp, B44_CAM_CTRL);
1370		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1371	}
1372}
1373
1374static int b44_set_mac_addr(struct net_device *dev, void *p)
1375{
1376	struct b44 *bp = netdev_priv(dev);
1377	struct sockaddr *addr = p;
1378	u32 val;
1379
1380	if (netif_running(dev))
1381		return -EBUSY;
1382
1383	if (!is_valid_ether_addr(addr->sa_data))
1384		return -EINVAL;
1385
1386	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1387
1388	spin_lock_irq(&bp->lock);
1389
1390	val = br32(bp, B44_RXCONFIG);
1391	if (!(val & RXCONFIG_CAM_ABSENT))
1392		__b44_set_mac_addr(bp);
1393
1394	spin_unlock_irq(&bp->lock);
1395
1396	return 0;
1397}
1398
1399/* Called at device open time to get the chip ready for
1400 * packet processing.  Invoked with bp->lock held.
1401 */
1402static void __b44_set_rx_mode(struct net_device *);
1403static void b44_init_hw(struct b44 *bp, int reset_kind)
1404{
1405	u32 val;
1406
1407	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1408	if (reset_kind == B44_FULL_RESET) {
1409		b44_phy_reset(bp);
1410		b44_setup_phy(bp);
1411	}
1412
1413	/* Enable CRC32, set proper LED modes and power on PHY */
1414	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1415	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1416
1417	/* This sets the MAC address too.  */
1418	__b44_set_rx_mode(bp->dev);
1419
1420	/* MTU + eth header + possible VLAN tag + struct rx_header */
1421	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1422	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1423
1424	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1425	if (reset_kind == B44_PARTIAL_RESET) {
1426		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1427				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1428	} else {
1429		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1430		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1431		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1432				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1433		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1434
1435		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1436		bp->rx_prod = bp->rx_pending;
1437
1438		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1439	}
1440
1441	val = br32(bp, B44_ENET_CTRL);
1442	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1443
1444	netdev_reset_queue(bp->dev);
1445}
1446
1447static int b44_open(struct net_device *dev)
1448{
1449	struct b44 *bp = netdev_priv(dev);
1450	int err;
1451
1452	err = b44_alloc_consistent(bp, GFP_KERNEL);
1453	if (err)
1454		goto out;
1455
1456	napi_enable(&bp->napi);
1457
1458	b44_init_rings(bp);
1459	b44_init_hw(bp, B44_FULL_RESET);
1460
1461	b44_check_phy(bp);
1462
1463	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1464	if (unlikely(err < 0)) {
1465		napi_disable(&bp->napi);
1466		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1467		b44_free_rings(bp);
1468		b44_free_consistent(bp);
1469		goto out;
1470	}
1471
1472	timer_setup(&bp->timer, b44_timer, 0);
1473	bp->timer.expires = jiffies + HZ;
 
 
1474	add_timer(&bp->timer);
1475
1476	b44_enable_ints(bp);
1477
1478	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1479		phy_start(dev->phydev);
1480
1481	netif_start_queue(dev);
1482out:
1483	return err;
1484}
1485
1486#ifdef CONFIG_NET_POLL_CONTROLLER
1487/*
1488 * Polling receive - used by netconsole and other diagnostic tools
1489 * to allow network i/o with interrupts disabled.
1490 */
1491static void b44_poll_controller(struct net_device *dev)
1492{
1493	disable_irq(dev->irq);
1494	b44_interrupt(dev->irq, dev);
1495	enable_irq(dev->irq);
1496}
1497#endif
1498
1499static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1500{
1501	u32 i;
1502	u32 *pattern = (u32 *) pp;
1503
1504	for (i = 0; i < bytes; i += sizeof(u32)) {
1505		bw32(bp, B44_FILT_ADDR, table_offset + i);
1506		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1507	}
1508}
1509
1510static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1511{
1512	int magicsync = 6;
1513	int k, j, len = offset;
1514	int ethaddr_bytes = ETH_ALEN;
1515
1516	memset(ppattern + offset, 0xff, magicsync);
1517	for (j = 0; j < magicsync; j++) {
1518		pmask[len >> 3] |= BIT(len & 7);
1519		len++;
1520	}
1521
1522	for (j = 0; j < B44_MAX_PATTERNS; j++) {
1523		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1524			ethaddr_bytes = ETH_ALEN;
1525		else
1526			ethaddr_bytes = B44_PATTERN_SIZE - len;
1527		if (ethaddr_bytes <=0)
1528			break;
1529		for (k = 0; k< ethaddr_bytes; k++) {
1530			ppattern[offset + magicsync +
1531				(j * ETH_ALEN) + k] = macaddr[k];
1532			pmask[len >> 3] |= BIT(len & 7);
1533			len++;
1534		}
1535	}
1536	return len - 1;
1537}
1538
1539/* Setup magic packet patterns in the b44 WOL
1540 * pattern matching filter.
1541 */
1542static void b44_setup_pseudo_magicp(struct b44 *bp)
1543{
1544
1545	u32 val;
1546	int plen0, plen1, plen2;
1547	u8 *pwol_pattern;
1548	u8 pwol_mask[B44_PMASK_SIZE];
1549
1550	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1551	if (!pwol_pattern)
1552		return;
1553
1554	/* Ipv4 magic packet pattern - pattern 0.*/
1555	memset(pwol_mask, 0, B44_PMASK_SIZE);
1556	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557				  B44_ETHIPV4UDP_HLEN);
1558
1559   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1560   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1561
1562	/* Raw ethernet II magic packet pattern - pattern 1 */
1563	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1564	memset(pwol_mask, 0, B44_PMASK_SIZE);
1565	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1566				  ETH_HLEN);
1567
1568   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1569		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1570  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1571		       B44_PMASK_BASE + B44_PMASK_SIZE);
1572
1573	/* Ipv6 magic packet pattern - pattern 2 */
1574	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1575	memset(pwol_mask, 0, B44_PMASK_SIZE);
1576	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1577				  B44_ETHIPV6UDP_HLEN);
1578
1579   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1580		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1581  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1582		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1583
1584	kfree(pwol_pattern);
1585
1586	/* set these pattern's lengths: one less than each real length */
1587	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1588	bw32(bp, B44_WKUP_LEN, val);
1589
1590	/* enable wakeup pattern matching */
1591	val = br32(bp, B44_DEVCTRL);
1592	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1593
1594}
1595
1596#ifdef CONFIG_B44_PCI
1597static void b44_setup_wol_pci(struct b44 *bp)
1598{
1599	u16 val;
1600
1601	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1602		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1603		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1604		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1605	}
1606}
1607#else
1608static inline void b44_setup_wol_pci(struct b44 *bp) { }
1609#endif /* CONFIG_B44_PCI */
1610
1611static void b44_setup_wol(struct b44 *bp)
1612{
1613	u32 val;
1614
1615	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1616
1617	if (bp->flags & B44_FLAG_B0_ANDLATER) {
1618
1619		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1620
1621		val = bp->dev->dev_addr[2] << 24 |
1622			bp->dev->dev_addr[3] << 16 |
1623			bp->dev->dev_addr[4] << 8 |
1624			bp->dev->dev_addr[5];
1625		bw32(bp, B44_ADDR_LO, val);
1626
1627		val = bp->dev->dev_addr[0] << 8 |
1628			bp->dev->dev_addr[1];
1629		bw32(bp, B44_ADDR_HI, val);
1630
1631		val = br32(bp, B44_DEVCTRL);
1632		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1633
1634 	} else {
1635 		b44_setup_pseudo_magicp(bp);
1636 	}
1637	b44_setup_wol_pci(bp);
1638}
1639
1640static int b44_close(struct net_device *dev)
1641{
1642	struct b44 *bp = netdev_priv(dev);
1643
1644	netif_stop_queue(dev);
1645
1646	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1647		phy_stop(dev->phydev);
1648
1649	napi_disable(&bp->napi);
1650
1651	del_timer_sync(&bp->timer);
1652
1653	spin_lock_irq(&bp->lock);
1654
1655	b44_halt(bp);
1656	b44_free_rings(bp);
1657	netif_carrier_off(dev);
1658
1659	spin_unlock_irq(&bp->lock);
1660
1661	free_irq(dev->irq, dev);
1662
1663	if (bp->flags & B44_FLAG_WOL_ENABLE) {
1664		b44_init_hw(bp, B44_PARTIAL_RESET);
1665		b44_setup_wol(bp);
1666	}
1667
1668	b44_free_consistent(bp);
1669
1670	return 0;
1671}
1672
1673static void b44_get_stats64(struct net_device *dev,
1674			    struct rtnl_link_stats64 *nstat)
1675{
1676	struct b44 *bp = netdev_priv(dev);
1677	struct b44_hw_stats *hwstat = &bp->hw_stats;
1678	unsigned int start;
1679
1680	do {
1681		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1682
1683		/* Convert HW stats into rtnl_link_stats64 stats. */
1684		nstat->rx_packets = hwstat->rx_pkts;
1685		nstat->tx_packets = hwstat->tx_pkts;
1686		nstat->rx_bytes   = hwstat->rx_octets;
1687		nstat->tx_bytes   = hwstat->tx_octets;
1688		nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1689				     hwstat->tx_oversize_pkts +
1690				     hwstat->tx_underruns +
1691				     hwstat->tx_excessive_cols +
1692				     hwstat->tx_late_cols);
1693		nstat->multicast  = hwstat->rx_multicast_pkts;
1694		nstat->collisions = hwstat->tx_total_cols;
1695
1696		nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1697					   hwstat->rx_undersize);
1698		nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1699		nstat->rx_frame_errors  = hwstat->rx_align_errs;
1700		nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1701		nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1702					   hwstat->rx_oversize_pkts +
1703					   hwstat->rx_missed_pkts +
1704					   hwstat->rx_crc_align_errs +
1705					   hwstat->rx_undersize +
1706					   hwstat->rx_crc_errs +
1707					   hwstat->rx_align_errs +
1708					   hwstat->rx_symbol_errs);
1709
1710		nstat->tx_aborted_errors = hwstat->tx_underruns;
1711#if 0
1712		/* Carrier lost counter seems to be broken for some devices */
1713		nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1714#endif
1715	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1716
 
1717}
1718
1719static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1720{
1721	struct netdev_hw_addr *ha;
1722	int i, num_ents;
1723
1724	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1725	i = 0;
1726	netdev_for_each_mc_addr(ha, dev) {
1727		if (i == num_ents)
1728			break;
1729		__b44_cam_write(bp, ha->addr, i++ + 1);
1730	}
1731	return i+1;
1732}
1733
1734static void __b44_set_rx_mode(struct net_device *dev)
1735{
1736	struct b44 *bp = netdev_priv(dev);
1737	u32 val;
1738
1739	val = br32(bp, B44_RXCONFIG);
1740	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1741	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1742		val |= RXCONFIG_PROMISC;
1743		bw32(bp, B44_RXCONFIG, val);
1744	} else {
1745		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1746		int i = 1;
1747
1748		__b44_set_mac_addr(bp);
1749
1750		if ((dev->flags & IFF_ALLMULTI) ||
1751		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1752			val |= RXCONFIG_ALLMULTI;
1753		else
1754			i = __b44_load_mcast(bp, dev);
1755
1756		for (; i < 64; i++)
1757			__b44_cam_write(bp, zero, i);
1758
1759		bw32(bp, B44_RXCONFIG, val);
1760        	val = br32(bp, B44_CAM_CTRL);
1761	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1762	}
1763}
1764
1765static void b44_set_rx_mode(struct net_device *dev)
1766{
1767	struct b44 *bp = netdev_priv(dev);
1768
1769	spin_lock_irq(&bp->lock);
1770	__b44_set_rx_mode(dev);
1771	spin_unlock_irq(&bp->lock);
1772}
1773
1774static u32 b44_get_msglevel(struct net_device *dev)
1775{
1776	struct b44 *bp = netdev_priv(dev);
1777	return bp->msg_enable;
1778}
1779
1780static void b44_set_msglevel(struct net_device *dev, u32 value)
1781{
1782	struct b44 *bp = netdev_priv(dev);
1783	bp->msg_enable = value;
1784}
1785
1786static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1787{
1788	struct b44 *bp = netdev_priv(dev);
1789	struct ssb_bus *bus = bp->sdev->bus;
1790
1791	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 
1792	switch (bus->bustype) {
1793	case SSB_BUSTYPE_PCI:
1794		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1795		break;
1796	case SSB_BUSTYPE_SSB:
1797		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1798		break;
1799	case SSB_BUSTYPE_PCMCIA:
1800	case SSB_BUSTYPE_SDIO:
1801		WARN_ON(1); /* A device with this bus does not exist. */
1802		break;
1803	}
1804}
1805
1806static int b44_nway_reset(struct net_device *dev)
1807{
1808	struct b44 *bp = netdev_priv(dev);
1809	u32 bmcr;
1810	int r;
1811
1812	spin_lock_irq(&bp->lock);
1813	b44_readphy(bp, MII_BMCR, &bmcr);
1814	b44_readphy(bp, MII_BMCR, &bmcr);
1815	r = -EINVAL;
1816	if (bmcr & BMCR_ANENABLE) {
1817		b44_writephy(bp, MII_BMCR,
1818			     bmcr | BMCR_ANRESTART);
1819		r = 0;
1820	}
1821	spin_unlock_irq(&bp->lock);
1822
1823	return r;
1824}
1825
1826static int b44_get_link_ksettings(struct net_device *dev,
1827				  struct ethtool_link_ksettings *cmd)
1828{
1829	struct b44 *bp = netdev_priv(dev);
1830	u32 supported, advertising;
1831
1832	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1833		BUG_ON(!dev->phydev);
1834		phy_ethtool_ksettings_get(dev->phydev, cmd);
1835
1836		return 0;
1837	}
1838
1839	supported = (SUPPORTED_Autoneg);
1840	supported |= (SUPPORTED_100baseT_Half |
1841		      SUPPORTED_100baseT_Full |
1842		      SUPPORTED_10baseT_Half |
1843		      SUPPORTED_10baseT_Full |
1844		      SUPPORTED_MII);
1845
1846	advertising = 0;
1847	if (bp->flags & B44_FLAG_ADV_10HALF)
1848		advertising |= ADVERTISED_10baseT_Half;
1849	if (bp->flags & B44_FLAG_ADV_10FULL)
1850		advertising |= ADVERTISED_10baseT_Full;
1851	if (bp->flags & B44_FLAG_ADV_100HALF)
1852		advertising |= ADVERTISED_100baseT_Half;
1853	if (bp->flags & B44_FLAG_ADV_100FULL)
1854		advertising |= ADVERTISED_100baseT_Full;
1855	advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1856	cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1857		SPEED_100 : SPEED_10;
1858	cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1859		DUPLEX_FULL : DUPLEX_HALF;
1860	cmd->base.port = 0;
1861	cmd->base.phy_address = bp->phy_addr;
1862	cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
 
 
1863		AUTONEG_DISABLE : AUTONEG_ENABLE;
1864	if (cmd->base.autoneg == AUTONEG_ENABLE)
1865		advertising |= ADVERTISED_Autoneg;
1866
1867	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1868						supported);
1869	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1870						advertising);
1871
1872	if (!netif_running(dev)){
1873		cmd->base.speed = 0;
1874		cmd->base.duplex = 0xff;
1875	}
1876
 
1877	return 0;
1878}
1879
1880static int b44_set_link_ksettings(struct net_device *dev,
1881				  const struct ethtool_link_ksettings *cmd)
1882{
1883	struct b44 *bp = netdev_priv(dev);
1884	u32 speed;
1885	int ret;
1886	u32 advertising;
1887
1888	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1889		BUG_ON(!dev->phydev);
1890		spin_lock_irq(&bp->lock);
1891		if (netif_running(dev))
1892			b44_setup_phy(bp);
1893
1894		ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1895
1896		spin_unlock_irq(&bp->lock);
1897
1898		return ret;
1899	}
1900
1901	speed = cmd->base.speed;
1902
1903	ethtool_convert_link_mode_to_legacy_u32(&advertising,
1904						cmd->link_modes.advertising);
1905
1906	/* We do not support gigabit. */
1907	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1908		if (advertising &
1909		    (ADVERTISED_1000baseT_Half |
1910		     ADVERTISED_1000baseT_Full))
1911			return -EINVAL;
1912	} else if ((speed != SPEED_100 &&
1913		    speed != SPEED_10) ||
1914		   (cmd->base.duplex != DUPLEX_HALF &&
1915		    cmd->base.duplex != DUPLEX_FULL)) {
1916			return -EINVAL;
1917	}
1918
1919	spin_lock_irq(&bp->lock);
1920
1921	if (cmd->base.autoneg == AUTONEG_ENABLE) {
1922		bp->flags &= ~(B44_FLAG_FORCE_LINK |
1923			       B44_FLAG_100_BASE_T |
1924			       B44_FLAG_FULL_DUPLEX |
1925			       B44_FLAG_ADV_10HALF |
1926			       B44_FLAG_ADV_10FULL |
1927			       B44_FLAG_ADV_100HALF |
1928			       B44_FLAG_ADV_100FULL);
1929		if (advertising == 0) {
1930			bp->flags |= (B44_FLAG_ADV_10HALF |
1931				      B44_FLAG_ADV_10FULL |
1932				      B44_FLAG_ADV_100HALF |
1933				      B44_FLAG_ADV_100FULL);
1934		} else {
1935			if (advertising & ADVERTISED_10baseT_Half)
1936				bp->flags |= B44_FLAG_ADV_10HALF;
1937			if (advertising & ADVERTISED_10baseT_Full)
1938				bp->flags |= B44_FLAG_ADV_10FULL;
1939			if (advertising & ADVERTISED_100baseT_Half)
1940				bp->flags |= B44_FLAG_ADV_100HALF;
1941			if (advertising & ADVERTISED_100baseT_Full)
1942				bp->flags |= B44_FLAG_ADV_100FULL;
1943		}
1944	} else {
1945		bp->flags |= B44_FLAG_FORCE_LINK;
1946		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1947		if (speed == SPEED_100)
1948			bp->flags |= B44_FLAG_100_BASE_T;
1949		if (cmd->base.duplex == DUPLEX_FULL)
1950			bp->flags |= B44_FLAG_FULL_DUPLEX;
1951	}
1952
1953	if (netif_running(dev))
1954		b44_setup_phy(bp);
1955
1956	spin_unlock_irq(&bp->lock);
1957
1958	return 0;
1959}
1960
1961static void b44_get_ringparam(struct net_device *dev,
1962			      struct ethtool_ringparam *ering)
1963{
1964	struct b44 *bp = netdev_priv(dev);
1965
1966	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1967	ering->rx_pending = bp->rx_pending;
1968
1969	/* XXX ethtool lacks a tx_max_pending, oops... */
1970}
1971
1972static int b44_set_ringparam(struct net_device *dev,
1973			     struct ethtool_ringparam *ering)
1974{
1975	struct b44 *bp = netdev_priv(dev);
1976
1977	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1978	    (ering->rx_mini_pending != 0) ||
1979	    (ering->rx_jumbo_pending != 0) ||
1980	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
1981		return -EINVAL;
1982
1983	spin_lock_irq(&bp->lock);
1984
1985	bp->rx_pending = ering->rx_pending;
1986	bp->tx_pending = ering->tx_pending;
1987
1988	b44_halt(bp);
1989	b44_init_rings(bp);
1990	b44_init_hw(bp, B44_FULL_RESET);
1991	netif_wake_queue(bp->dev);
1992	spin_unlock_irq(&bp->lock);
1993
1994	b44_enable_ints(bp);
1995
1996	return 0;
1997}
1998
1999static void b44_get_pauseparam(struct net_device *dev,
2000				struct ethtool_pauseparam *epause)
2001{
2002	struct b44 *bp = netdev_priv(dev);
2003
2004	epause->autoneg =
2005		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2006	epause->rx_pause =
2007		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
2008	epause->tx_pause =
2009		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
2010}
2011
2012static int b44_set_pauseparam(struct net_device *dev,
2013				struct ethtool_pauseparam *epause)
2014{
2015	struct b44 *bp = netdev_priv(dev);
2016
2017	spin_lock_irq(&bp->lock);
2018	if (epause->autoneg)
2019		bp->flags |= B44_FLAG_PAUSE_AUTO;
2020	else
2021		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2022	if (epause->rx_pause)
2023		bp->flags |= B44_FLAG_RX_PAUSE;
2024	else
2025		bp->flags &= ~B44_FLAG_RX_PAUSE;
2026	if (epause->tx_pause)
2027		bp->flags |= B44_FLAG_TX_PAUSE;
2028	else
2029		bp->flags &= ~B44_FLAG_TX_PAUSE;
2030	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2031		b44_halt(bp);
2032		b44_init_rings(bp);
2033		b44_init_hw(bp, B44_FULL_RESET);
2034	} else {
2035		__b44_set_flow_ctrl(bp, bp->flags);
2036	}
2037	spin_unlock_irq(&bp->lock);
2038
2039	b44_enable_ints(bp);
2040
2041	return 0;
2042}
2043
2044static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2045{
2046	switch(stringset) {
2047	case ETH_SS_STATS:
2048		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2049		break;
2050	}
2051}
2052
2053static int b44_get_sset_count(struct net_device *dev, int sset)
2054{
2055	switch (sset) {
2056	case ETH_SS_STATS:
2057		return ARRAY_SIZE(b44_gstrings);
2058	default:
2059		return -EOPNOTSUPP;
2060	}
2061}
2062
2063static void b44_get_ethtool_stats(struct net_device *dev,
2064				  struct ethtool_stats *stats, u64 *data)
2065{
2066	struct b44 *bp = netdev_priv(dev);
2067	struct b44_hw_stats *hwstat = &bp->hw_stats;
2068	u64 *data_src, *data_dst;
2069	unsigned int start;
2070	u32 i;
2071
2072	spin_lock_irq(&bp->lock);
2073	b44_stats_update(bp);
2074	spin_unlock_irq(&bp->lock);
2075
2076	do {
2077		data_src = &hwstat->tx_good_octets;
2078		data_dst = data;
2079		start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2080
2081		for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2082			*data_dst++ = *data_src++;
2083
2084	} while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2085}
2086
2087static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088{
2089	struct b44 *bp = netdev_priv(dev);
2090
2091	wol->supported = WAKE_MAGIC;
2092	if (bp->flags & B44_FLAG_WOL_ENABLE)
2093		wol->wolopts = WAKE_MAGIC;
2094	else
2095		wol->wolopts = 0;
2096	memset(&wol->sopass, 0, sizeof(wol->sopass));
2097}
2098
2099static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2100{
2101	struct b44 *bp = netdev_priv(dev);
2102
2103	spin_lock_irq(&bp->lock);
2104	if (wol->wolopts & WAKE_MAGIC)
2105		bp->flags |= B44_FLAG_WOL_ENABLE;
2106	else
2107		bp->flags &= ~B44_FLAG_WOL_ENABLE;
2108	spin_unlock_irq(&bp->lock);
2109
2110	device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2111	return 0;
2112}
2113
2114static const struct ethtool_ops b44_ethtool_ops = {
2115	.get_drvinfo		= b44_get_drvinfo,
 
 
2116	.nway_reset		= b44_nway_reset,
2117	.get_link		= ethtool_op_get_link,
2118	.get_wol		= b44_get_wol,
2119	.set_wol		= b44_set_wol,
2120	.get_ringparam		= b44_get_ringparam,
2121	.set_ringparam		= b44_set_ringparam,
2122	.get_pauseparam		= b44_get_pauseparam,
2123	.set_pauseparam		= b44_set_pauseparam,
2124	.get_msglevel		= b44_get_msglevel,
2125	.set_msglevel		= b44_set_msglevel,
2126	.get_strings		= b44_get_strings,
2127	.get_sset_count		= b44_get_sset_count,
2128	.get_ethtool_stats	= b44_get_ethtool_stats,
2129	.get_link_ksettings	= b44_get_link_ksettings,
2130	.set_link_ksettings	= b44_set_link_ksettings,
2131};
2132
2133static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2134{
2135	struct b44 *bp = netdev_priv(dev);
2136	int err = -EINVAL;
2137
2138	if (!netif_running(dev))
2139		goto out;
2140
2141	spin_lock_irq(&bp->lock);
2142	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2143		BUG_ON(!dev->phydev);
2144		err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2145	} else {
2146		err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2147	}
2148	spin_unlock_irq(&bp->lock);
2149out:
2150	return err;
2151}
2152
2153static int b44_get_invariants(struct b44 *bp)
2154{
2155	struct ssb_device *sdev = bp->sdev;
2156	int err = 0;
2157	u8 *addr;
2158
2159	bp->dma_offset = ssb_dma_translation(sdev);
2160
2161	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2162	    instance > 1) {
2163		addr = sdev->bus->sprom.et1mac;
2164		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2165	} else {
2166		addr = sdev->bus->sprom.et0mac;
2167		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2168	}
2169	/* Some ROMs have buggy PHY addresses with the high
2170	 * bits set (sign extension?). Truncate them to a
2171	 * valid PHY address. */
2172	bp->phy_addr &= 0x1F;
2173
2174	memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2175
2176	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2177		pr_err("Invalid MAC address found in EEPROM\n");
2178		return -EINVAL;
2179	}
2180
2181	bp->imask = IMASK_DEF;
2182
2183	/* XXX - really required?
2184	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
2185	*/
2186
2187	if (bp->sdev->id.revision >= 7)
2188		bp->flags |= B44_FLAG_B0_ANDLATER;
2189
2190	return err;
2191}
2192
2193static const struct net_device_ops b44_netdev_ops = {
2194	.ndo_open		= b44_open,
2195	.ndo_stop		= b44_close,
2196	.ndo_start_xmit		= b44_start_xmit,
2197	.ndo_get_stats64	= b44_get_stats64,
2198	.ndo_set_rx_mode	= b44_set_rx_mode,
2199	.ndo_set_mac_address	= b44_set_mac_addr,
2200	.ndo_validate_addr	= eth_validate_addr,
2201	.ndo_do_ioctl		= b44_ioctl,
2202	.ndo_tx_timeout		= b44_tx_timeout,
2203	.ndo_change_mtu		= b44_change_mtu,
2204#ifdef CONFIG_NET_POLL_CONTROLLER
2205	.ndo_poll_controller	= b44_poll_controller,
2206#endif
2207};
2208
2209static void b44_adjust_link(struct net_device *dev)
2210{
2211	struct b44 *bp = netdev_priv(dev);
2212	struct phy_device *phydev = dev->phydev;
2213	bool status_changed = 0;
2214
2215	BUG_ON(!phydev);
2216
2217	if (bp->old_link != phydev->link) {
2218		status_changed = 1;
2219		bp->old_link = phydev->link;
2220	}
2221
2222	/* reflect duplex change */
2223	if (phydev->link) {
2224		if ((phydev->duplex == DUPLEX_HALF) &&
2225		    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2226			status_changed = 1;
2227			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2228		} else if ((phydev->duplex == DUPLEX_FULL) &&
2229			   !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2230			status_changed = 1;
2231			bp->flags |= B44_FLAG_FULL_DUPLEX;
2232		}
2233	}
2234
2235	if (status_changed) {
2236		u32 val = br32(bp, B44_TX_CTRL);
2237		if (bp->flags & B44_FLAG_FULL_DUPLEX)
2238			val |= TX_CTRL_DUPLEX;
2239		else
2240			val &= ~TX_CTRL_DUPLEX;
2241		bw32(bp, B44_TX_CTRL, val);
2242		phy_print_status(phydev);
2243	}
2244}
2245
2246static int b44_register_phy_one(struct b44 *bp)
2247{
2248	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2249	struct mii_bus *mii_bus;
2250	struct ssb_device *sdev = bp->sdev;
2251	struct phy_device *phydev;
2252	char bus_id[MII_BUS_ID_SIZE + 3];
2253	struct ssb_sprom *sprom = &sdev->bus->sprom;
2254	int err;
2255
2256	mii_bus = mdiobus_alloc();
2257	if (!mii_bus) {
2258		dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2259		err = -ENOMEM;
2260		goto err_out;
2261	}
2262
2263	mii_bus->priv = bp;
2264	mii_bus->read = b44_mdio_read_phylib;
2265	mii_bus->write = b44_mdio_write_phylib;
2266	mii_bus->name = "b44_eth_mii";
2267	mii_bus->parent = sdev->dev;
2268	mii_bus->phy_mask = ~(1 << bp->phy_addr);
2269	snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
 
 
 
 
 
 
 
 
2270
2271	bp->mii_bus = mii_bus;
2272
2273	err = mdiobus_register(mii_bus);
2274	if (err) {
2275		dev_err(sdev->dev, "failed to register MII bus\n");
2276		goto err_out_mdiobus;
2277	}
2278
2279	if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2280	    (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2281
2282		dev_info(sdev->dev,
2283			 "could not find PHY at %i, use fixed one\n",
2284			 bp->phy_addr);
2285
2286		bp->phy_addr = 0;
2287		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2288			 bp->phy_addr);
2289	} else {
2290		snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2291			 bp->phy_addr);
2292	}
2293
2294	phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2295			     PHY_INTERFACE_MODE_MII);
2296	if (IS_ERR(phydev)) {
2297		dev_err(sdev->dev, "could not attach PHY at %i\n",
2298			bp->phy_addr);
2299		err = PTR_ERR(phydev);
2300		goto err_out_mdiobus_unregister;
2301	}
2302
2303	/* mask with MAC supported features */
2304	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2305	linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2306	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2307	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2308	linkmode_and(phydev->supported, phydev->supported, mask);
2309	linkmode_copy(phydev->advertising, phydev->supported);
2310
 
2311	bp->old_link = 0;
2312	bp->phy_addr = phydev->mdio.addr;
2313
2314	phy_attached_info(phydev);
 
2315
2316	return 0;
2317
2318err_out_mdiobus_unregister:
2319	mdiobus_unregister(mii_bus);
2320
 
 
 
2321err_out_mdiobus:
2322	mdiobus_free(mii_bus);
2323
2324err_out:
2325	return err;
2326}
2327
2328static void b44_unregister_phy_one(struct b44 *bp)
2329{
2330	struct net_device *dev = bp->dev;
2331	struct mii_bus *mii_bus = bp->mii_bus;
2332
2333	phy_disconnect(dev->phydev);
2334	mdiobus_unregister(mii_bus);
 
2335	mdiobus_free(mii_bus);
2336}
2337
2338static int b44_init_one(struct ssb_device *sdev,
2339			const struct ssb_device_id *ent)
2340{
2341	struct net_device *dev;
2342	struct b44 *bp;
2343	int err;
2344
2345	instance++;
2346
 
 
2347	dev = alloc_etherdev(sizeof(*bp));
2348	if (!dev) {
2349		err = -ENOMEM;
2350		goto out;
2351	}
2352
2353	SET_NETDEV_DEV(dev, sdev->dev);
2354
2355	/* No interesting netdevice features in this card... */
2356	dev->features |= 0;
2357
2358	bp = netdev_priv(dev);
2359	bp->sdev = sdev;
2360	bp->dev = dev;
2361	bp->force_copybreak = 0;
2362
2363	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2364
2365	spin_lock_init(&bp->lock);
2366	u64_stats_init(&bp->hw_stats.syncp);
2367
2368	bp->rx_pending = B44_DEF_RX_RING_PENDING;
2369	bp->tx_pending = B44_DEF_TX_RING_PENDING;
2370
2371	dev->netdev_ops = &b44_netdev_ops;
2372	netif_napi_add(dev, &bp->napi, b44_poll, 64);
2373	dev->watchdog_timeo = B44_TX_TIMEOUT;
2374	dev->min_mtu = B44_MIN_MTU;
2375	dev->max_mtu = B44_MAX_MTU;
2376	dev->irq = sdev->irq;
2377	dev->ethtool_ops = &b44_ethtool_ops;
2378
2379	err = ssb_bus_powerup(sdev->bus, 0);
2380	if (err) {
2381		dev_err(sdev->dev,
2382			"Failed to powerup the bus\n");
2383		goto err_out_free_dev;
2384	}
2385
2386	if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2387		dev_err(sdev->dev,
2388			"Required 30BIT DMA mask unsupported by the system\n");
2389		goto err_out_powerdown;
2390	}
2391
2392	err = b44_get_invariants(bp);
2393	if (err) {
2394		dev_err(sdev->dev,
2395			"Problem fetching invariants of chip, aborting\n");
2396		goto err_out_powerdown;
2397	}
2398
2399	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2400		dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2401		err = -ENODEV;
2402		goto err_out_powerdown;
2403	}
2404
2405	bp->mii_if.dev = dev;
2406	bp->mii_if.mdio_read = b44_mdio_read_mii;
2407	bp->mii_if.mdio_write = b44_mdio_write_mii;
2408	bp->mii_if.phy_id = bp->phy_addr;
2409	bp->mii_if.phy_id_mask = 0x1f;
2410	bp->mii_if.reg_num_mask = 0x1f;
2411
2412	/* By default, advertise all speed/duplex settings. */
2413	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2414		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2415
2416	/* By default, auto-negotiate PAUSE. */
2417	bp->flags |= B44_FLAG_PAUSE_AUTO;
2418
2419	err = register_netdev(dev);
2420	if (err) {
2421		dev_err(sdev->dev, "Cannot register net device, aborting\n");
2422		goto err_out_powerdown;
2423	}
2424
2425	netif_carrier_off(dev);
2426
2427	ssb_set_drvdata(sdev, dev);
2428
2429	/* Chip reset provides power to the b44 MAC & PCI cores, which
2430	 * is necessary for MAC register access.
2431	 */
2432	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2433
2434	/* do a phy reset to test if there is an active phy */
2435	err = b44_phy_reset(bp);
2436	if (err < 0) {
2437		dev_err(sdev->dev, "phy reset failed\n");
2438		goto err_out_unregister_netdev;
2439	}
2440
2441	if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2442		err = b44_register_phy_one(bp);
2443		if (err) {
2444			dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2445			goto err_out_unregister_netdev;
2446		}
2447	}
2448
2449	device_set_wakeup_capable(sdev->dev, true);
2450	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2451
2452	return 0;
2453
2454err_out_unregister_netdev:
2455	unregister_netdev(dev);
2456err_out_powerdown:
2457	ssb_bus_may_powerdown(sdev->bus);
2458
2459err_out_free_dev:
2460	netif_napi_del(&bp->napi);
2461	free_netdev(dev);
2462
2463out:
2464	return err;
2465}
2466
2467static void b44_remove_one(struct ssb_device *sdev)
2468{
2469	struct net_device *dev = ssb_get_drvdata(sdev);
2470	struct b44 *bp = netdev_priv(dev);
2471
2472	unregister_netdev(dev);
2473	if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2474		b44_unregister_phy_one(bp);
2475	ssb_device_disable(sdev, 0);
2476	ssb_bus_may_powerdown(sdev->bus);
2477	netif_napi_del(&bp->napi);
2478	free_netdev(dev);
2479	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2480	ssb_set_drvdata(sdev, NULL);
2481}
2482
2483static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2484{
2485	struct net_device *dev = ssb_get_drvdata(sdev);
2486	struct b44 *bp = netdev_priv(dev);
2487
2488	if (!netif_running(dev))
2489		return 0;
2490
2491	del_timer_sync(&bp->timer);
2492
2493	spin_lock_irq(&bp->lock);
2494
2495	b44_halt(bp);
2496	netif_carrier_off(bp->dev);
2497	netif_device_detach(bp->dev);
2498	b44_free_rings(bp);
2499
2500	spin_unlock_irq(&bp->lock);
2501
2502	free_irq(dev->irq, dev);
2503	if (bp->flags & B44_FLAG_WOL_ENABLE) {
2504		b44_init_hw(bp, B44_PARTIAL_RESET);
2505		b44_setup_wol(bp);
2506	}
2507
2508	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2509	return 0;
2510}
2511
2512static int b44_resume(struct ssb_device *sdev)
2513{
2514	struct net_device *dev = ssb_get_drvdata(sdev);
2515	struct b44 *bp = netdev_priv(dev);
2516	int rc = 0;
2517
2518	rc = ssb_bus_powerup(sdev->bus, 0);
2519	if (rc) {
2520		dev_err(sdev->dev,
2521			"Failed to powerup the bus\n");
2522		return rc;
2523	}
2524
2525	if (!netif_running(dev))
2526		return 0;
2527
2528	spin_lock_irq(&bp->lock);
2529	b44_init_rings(bp);
2530	b44_init_hw(bp, B44_FULL_RESET);
2531	spin_unlock_irq(&bp->lock);
2532
2533	/*
2534	 * As a shared interrupt, the handler can be called immediately. To be
2535	 * able to check the interrupt status the hardware must already be
2536	 * powered back on (b44_init_hw).
2537	 */
2538	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2539	if (rc) {
2540		netdev_err(dev, "request_irq failed\n");
2541		spin_lock_irq(&bp->lock);
2542		b44_halt(bp);
2543		b44_free_rings(bp);
2544		spin_unlock_irq(&bp->lock);
2545		return rc;
2546	}
2547
2548	netif_device_attach(bp->dev);
2549
2550	b44_enable_ints(bp);
2551	netif_wake_queue(dev);
2552
2553	mod_timer(&bp->timer, jiffies + 1);
2554
2555	return 0;
2556}
2557
2558static struct ssb_driver b44_ssb_driver = {
2559	.name		= DRV_MODULE_NAME,
2560	.id_table	= b44_ssb_tbl,
2561	.probe		= b44_init_one,
2562	.remove		= b44_remove_one,
2563	.suspend	= b44_suspend,
2564	.resume		= b44_resume,
2565};
2566
2567static inline int __init b44_pci_init(void)
2568{
2569	int err = 0;
2570#ifdef CONFIG_B44_PCI
2571	err = ssb_pcihost_register(&b44_pci_driver);
2572#endif
2573	return err;
2574}
2575
2576static inline void b44_pci_exit(void)
2577{
2578#ifdef CONFIG_B44_PCI
2579	ssb_pcihost_unregister(&b44_pci_driver);
2580#endif
2581}
2582
2583static int __init b44_init(void)
2584{
2585	unsigned int dma_desc_align_size = dma_get_cache_alignment();
2586	int err;
2587
2588	/* Setup paramaters for syncing RX/TX DMA descriptors */
2589	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2590
2591	err = b44_pci_init();
2592	if (err)
2593		return err;
2594	err = ssb_driver_register(&b44_ssb_driver);
2595	if (err)
2596		b44_pci_exit();
2597	return err;
2598}
2599
2600static void __exit b44_cleanup(void)
2601{
2602	ssb_driver_unregister(&b44_ssb_driver);
2603	b44_pci_exit();
2604}
2605
2606module_init(b44_init);
2607module_exit(b44_cleanup);
2608